From 45cf3a64de4c0cb9e30b247cb99ba18bdb0e49dc Mon Sep 17 00:00:00 2001 From: Ivan Orlov Date: Fri, 28 Aug 2015 15:54:15 -0700 Subject: [PATCH] HUE-2943 [core] Add boto library --- desktop/core/ext-py/boto-2.38.0/.gitignore | 16 + desktop/core/ext-py/boto-2.38.0/MANIFEST.in | 12 + desktop/core/ext-py/boto-2.38.0/PKG-INFO | 230 + desktop/core/ext-py/boto-2.38.0/README.rst | 209 + desktop/core/ext-py/boto-2.38.0/bin/asadmin | 290 ++ desktop/core/ext-py/boto-2.38.0/bin/bundle_image | 27 + desktop/core/ext-py/boto-2.38.0/bin/cfadmin | 108 + desktop/core/ext-py/boto-2.38.0/bin/cq | 92 + desktop/core/ext-py/boto-2.38.0/bin/cwutil | 140 + desktop/core/ext-py/boto-2.38.0/bin/dynamodb_dump | 75 + desktop/core/ext-py/boto-2.38.0/bin/dynamodb_load | 109 + desktop/core/ext-py/boto-2.38.0/bin/elbadmin | 301 ++ desktop/core/ext-py/boto-2.38.0/bin/fetch_file | 46 + desktop/core/ext-py/boto-2.38.0/bin/glacier | 161 + .../core/ext-py/boto-2.38.0/bin/instance_events | 145 + desktop/core/ext-py/boto-2.38.0/bin/kill_instance | 35 + .../core/ext-py/boto-2.38.0/bin/launch_instance | 252 ++ desktop/core/ext-py/boto-2.38.0/bin/list_instances | 90 + desktop/core/ext-py/boto-2.38.0/bin/lss3 | 113 + desktop/core/ext-py/boto-2.38.0/bin/mturk | 514 +++ desktop/core/ext-py/boto-2.38.0/bin/pyami_sendmail | 52 + desktop/core/ext-py/boto-2.38.0/bin/route53 | 205 + desktop/core/ext-py/boto-2.38.0/bin/s3put | 438 ++ desktop/core/ext-py/boto-2.38.0/bin/sdbadmin | 194 + desktop/core/ext-py/boto-2.38.0/bin/taskadmin | 116 + desktop/core/ext-py/boto-2.38.0/boto/__init__.py | 1216 ++++++ desktop/core/ext-py/boto-2.38.0/boto/auth.py | 1040 +++++ .../core/ext-py/boto-2.38.0/boto/auth_handler.py | 60 + .../ext-py/boto-2.38.0/boto/awslambda/__init__.py | 40 + .../boto-2.38.0/boto/awslambda/exceptions.py | 38 + .../ext-py/boto-2.38.0/boto/awslambda/layer1.py | 517 +++ .../ext-py/boto-2.38.0/boto/beanstalk/__init__.py | 44 + .../ext-py/boto-2.38.0/boto/beanstalk/exception.py | 63 + .../ext-py/boto-2.38.0/boto/beanstalk/layer1.py | 1201 ++++++ .../ext-py/boto-2.38.0/boto/beanstalk/response.py | 704 +++ .../ext-py/boto-2.38.0/boto/beanstalk/wrapper.py | 29 + .../ext-py/boto-2.38.0/boto/cacerts/__init__.py | 22 + .../ext-py/boto-2.38.0/boto/cacerts/cacerts.txt | 3869 +++++++++++++++++ .../boto-2.38.0/boto/cloudformation/__init__.py | 56 + .../boto-2.38.0/boto/cloudformation/connection.py | 922 ++++ .../boto-2.38.0/boto/cloudformation/stack.py | 415 ++ .../boto-2.38.0/boto/cloudformation/template.py | 51 + .../ext-py/boto-2.38.0/boto/cloudfront/__init__.py | 326 ++ .../boto-2.38.0/boto/cloudfront/distribution.py | 757 ++++ .../boto-2.38.0/boto/cloudfront/exception.py | 26 + .../ext-py/boto-2.38.0/boto/cloudfront/identity.py | 121 + .../boto-2.38.0/boto/cloudfront/invalidation.py | 216 + .../ext-py/boto-2.38.0/boto/cloudfront/logging.py | 38 + .../ext-py/boto-2.38.0/boto/cloudfront/object.py | 48 + .../ext-py/boto-2.38.0/boto/cloudfront/origin.py | 150 + .../ext-py/boto-2.38.0/boto/cloudfront/signers.py | 59 + .../ext-py/boto-2.38.0/boto/cloudhsm/__init__.py | 41 + .../ext-py/boto-2.38.0/boto/cloudhsm/exceptions.py | 35 + .../ext-py/boto-2.38.0/boto/cloudhsm/layer1.py | 448 ++ .../boto-2.38.0/boto/cloudsearch/__init__.py | 45 + .../boto-2.38.0/boto/cloudsearch/document.py | 271 ++ .../ext-py/boto-2.38.0/boto/cloudsearch/domain.py | 394 ++ .../ext-py/boto-2.38.0/boto/cloudsearch/layer1.py | 747 ++++ .../ext-py/boto-2.38.0/boto/cloudsearch/layer2.py | 75 + .../boto-2.38.0/boto/cloudsearch/optionstatus.py | 248 ++ .../ext-py/boto-2.38.0/boto/cloudsearch/search.py | 377 ++ .../boto/cloudsearch/sourceattribute.py | 74 + .../boto-2.38.0/boto/cloudsearch2/__init__.py | 42 + .../boto-2.38.0/boto/cloudsearch2/document.py | 315 ++ .../ext-py/boto-2.38.0/boto/cloudsearch2/domain.py | 542 +++ .../boto-2.38.0/boto/cloudsearch2/exceptions.py | 46 + .../ext-py/boto-2.38.0/boto/cloudsearch2/layer1.py | 783 ++++ .../ext-py/boto-2.38.0/boto/cloudsearch2/layer2.py | 94 + .../boto-2.38.0/boto/cloudsearch2/optionstatus.py | 233 + .../ext-py/boto-2.38.0/boto/cloudsearch2/search.py | 452 ++ .../boto-2.38.0/boto/cloudsearchdomain/__init__.py | 41 + .../boto/cloudsearchdomain/exceptions.py | 30 + .../boto-2.38.0/boto/cloudsearchdomain/layer1.py | 540 +++ .../ext-py/boto-2.38.0/boto/cloudtrail/__init__.py | 41 + .../boto-2.38.0/boto/cloudtrail/exceptions.py | 118 + .../ext-py/boto-2.38.0/boto/cloudtrail/layer1.py | 374 ++ .../ext-py/boto-2.38.0/boto/codedeploy/__init__.py | 40 + .../boto-2.38.0/boto/codedeploy/exceptions.py | 199 + .../ext-py/boto-2.38.0/boto/codedeploy/layer1.py | 899 ++++ .../ext-py/boto-2.38.0/boto/cognito/__init__.py | 21 + .../boto-2.38.0/boto/cognito/identity/__init__.py | 42 + .../boto/cognito/identity/exceptions.py | 44 + .../boto-2.38.0/boto/cognito/identity/layer1.py | 549 +++ .../boto-2.38.0/boto/cognito/sync/__init__.py | 41 + .../boto-2.38.0/boto/cognito/sync/exceptions.py | 54 + .../ext-py/boto-2.38.0/boto/cognito/sync/layer1.py | 494 +++ desktop/core/ext-py/boto-2.38.0/boto/compat.py | 67 + .../boto-2.38.0/boto/configservice/__init__.py | 41 + .../boto-2.38.0/boto/configservice/exceptions.py | 103 + .../boto-2.38.0/boto/configservice/layer1.py | 381 ++ desktop/core/ext-py/boto-2.38.0/boto/connection.py | 1227 ++++++ .../ext-py/boto-2.38.0/boto/contrib/__init__.py | 21 + .../ext-py/boto-2.38.0/boto/contrib/ymlmessage.py | 53 + .../boto-2.38.0/boto/datapipeline/__init__.py | 41 + .../boto-2.38.0/boto/datapipeline/exceptions.py | 42 + .../ext-py/boto-2.38.0/boto/datapipeline/layer1.py | 639 +++ .../boto-2.38.0/boto/directconnect/__init__.py | 41 + .../boto-2.38.0/boto/directconnect/exceptions.py | 29 + .../boto-2.38.0/boto/directconnect/layer1.py | 627 +++ .../ext-py/boto-2.38.0/boto/dynamodb/__init__.py | 42 + .../core/ext-py/boto-2.38.0/boto/dynamodb/batch.py | 261 ++ .../ext-py/boto-2.38.0/boto/dynamodb/condition.py | 170 + .../ext-py/boto-2.38.0/boto/dynamodb/exceptions.py | 64 + .../core/ext-py/boto-2.38.0/boto/dynamodb/item.py | 202 + .../ext-py/boto-2.38.0/boto/dynamodb/layer1.py | 577 +++ .../ext-py/boto-2.38.0/boto/dynamodb/layer2.py | 806 ++++ .../ext-py/boto-2.38.0/boto/dynamodb/schema.py | 112 + .../core/ext-py/boto-2.38.0/boto/dynamodb/table.py | 546 +++ .../core/ext-py/boto-2.38.0/boto/dynamodb/types.py | 410 ++ .../ext-py/boto-2.38.0/boto/dynamodb2/__init__.py | 42 + .../boto-2.38.0/boto/dynamodb2/exceptions.py | 78 + .../ext-py/boto-2.38.0/boto/dynamodb2/fields.py | 337 ++ .../ext-py/boto-2.38.0/boto/dynamodb2/items.py | 473 ++ .../ext-py/boto-2.38.0/boto/dynamodb2/layer1.py | 2904 +++++++++++++ .../ext-py/boto-2.38.0/boto/dynamodb2/results.py | 204 + .../ext-py/boto-2.38.0/boto/dynamodb2/table.py | 1722 ++++++++ .../ext-py/boto-2.38.0/boto/dynamodb2/types.py | 44 + .../core/ext-py/boto-2.38.0/boto/ec2/__init__.py | 86 + .../core/ext-py/boto-2.38.0/boto/ec2/address.py | 130 + .../core/ext-py/boto-2.38.0/boto/ec2/attributes.py | 71 + .../boto-2.38.0/boto/ec2/autoscale/__init__.py | 895 ++++ .../boto-2.38.0/boto/ec2/autoscale/activity.py | 73 + .../ext-py/boto-2.38.0/boto/ec2/autoscale/group.py | 361 ++ .../boto-2.38.0/boto/ec2/autoscale/instance.py | 59 + .../boto-2.38.0/boto/ec2/autoscale/launchconfig.py | 270 ++ .../boto-2.38.0/boto/ec2/autoscale/limits.py | 44 + .../boto-2.38.0/boto/ec2/autoscale/policy.py | 181 + .../boto-2.38.0/boto/ec2/autoscale/request.py | 38 + .../boto-2.38.0/boto/ec2/autoscale/scheduled.py | 77 + .../ext-py/boto-2.38.0/boto/ec2/autoscale/tag.py | 84 + .../boto-2.38.0/boto/ec2/blockdevicemapping.py | 165 + .../ext-py/boto-2.38.0/boto/ec2/bundleinstance.py | 78 + .../ext-py/boto-2.38.0/boto/ec2/buyreservation.py | 85 + .../boto-2.38.0/boto/ec2/cloudwatch/__init__.py | 593 +++ .../boto-2.38.0/boto/ec2/cloudwatch/alarm.py | 323 ++ .../boto-2.38.0/boto/ec2/cloudwatch/datapoint.py | 40 + .../boto-2.38.0/boto/ec2/cloudwatch/dimension.py | 38 + .../boto-2.38.0/boto/ec2/cloudwatch/listelement.py | 30 + .../boto-2.38.0/boto/ec2/cloudwatch/metric.py | 168 + .../core/ext-py/boto-2.38.0/boto/ec2/connection.py | 4506 ++++++++++++++++++++ .../core/ext-py/boto-2.38.0/boto/ec2/ec2object.py | 144 + .../ext-py/boto-2.38.0/boto/ec2/elb/__init__.py | 758 ++++ .../ext-py/boto-2.38.0/boto/ec2/elb/attributes.py | 154 + .../ext-py/boto-2.38.0/boto/ec2/elb/healthcheck.py | 89 + .../boto-2.38.0/boto/ec2/elb/instancestate.py | 63 + .../ext-py/boto-2.38.0/boto/ec2/elb/listelement.py | 36 + .../ext-py/boto-2.38.0/boto/ec2/elb/listener.py | 87 + .../boto-2.38.0/boto/ec2/elb/loadbalancer.py | 419 ++ .../ext-py/boto-2.38.0/boto/ec2/elb/policies.py | 108 + .../boto-2.38.0/boto/ec2/elb/securitygroup.py | 38 + desktop/core/ext-py/boto-2.38.0/boto/ec2/group.py | 38 + desktop/core/ext-py/boto-2.38.0/boto/ec2/image.py | 440 ++ .../core/ext-py/boto-2.38.0/boto/ec2/instance.py | 678 +++ .../ext-py/boto-2.38.0/boto/ec2/instanceinfo.py | 49 + .../ext-py/boto-2.38.0/boto/ec2/instancestatus.py | 212 + .../ext-py/boto-2.38.0/boto/ec2/instancetype.py | 59 + .../core/ext-py/boto-2.38.0/boto/ec2/keypair.py | 111 + .../boto-2.38.0/boto/ec2/launchspecification.py | 105 + .../boto-2.38.0/boto/ec2/networkinterface.py | 351 ++ .../ext-py/boto-2.38.0/boto/ec2/placementgroup.py | 53 + .../core/ext-py/boto-2.38.0/boto/ec2/regioninfo.py | 36 + .../boto-2.38.0/boto/ec2/reservedinstance.py | 352 ++ .../ext-py/boto-2.38.0/boto/ec2/securitygroup.py | 392 ++ .../core/ext-py/boto-2.38.0/boto/ec2/snapshot.py | 187 + .../boto/ec2/spotdatafeedsubscription.py | 65 + .../boto-2.38.0/boto/ec2/spotinstancerequest.py | 192 + .../boto-2.38.0/boto/ec2/spotpricehistory.py | 54 + desktop/core/ext-py/boto-2.38.0/boto/ec2/tag.py | 84 + desktop/core/ext-py/boto-2.38.0/boto/ec2/volume.py | 315 ++ .../ext-py/boto-2.38.0/boto/ec2/volumestatus.py | 205 + desktop/core/ext-py/boto-2.38.0/boto/ec2/zone.py | 78 + .../boto/ec2containerservice/__init__.py | 41 + .../boto/ec2containerservice/exceptions.py | 31 + .../boto-2.38.0/boto/ec2containerservice/layer1.py | 748 ++++ .../core/ext-py/boto-2.38.0/boto/ecs/__init__.py | 105 + desktop/core/ext-py/boto-2.38.0/boto/ecs/item.py | 164 + .../boto-2.38.0/boto/elasticache/__init__.py | 41 + .../ext-py/boto-2.38.0/boto/elasticache/layer1.py | 1664 ++++++++ .../boto-2.38.0/boto/elastictranscoder/__init__.py | 45 + .../boto/elastictranscoder/exceptions.py | 50 + .../boto-2.38.0/boto/elastictranscoder/layer1.py | 932 ++++ .../core/ext-py/boto-2.38.0/boto/emr/__init__.py | 49 + .../boto-2.38.0/boto/emr/bootstrap_action.py | 46 + .../core/ext-py/boto-2.38.0/boto/emr/connection.py | 754 ++++ .../core/ext-py/boto-2.38.0/boto/emr/emrobject.py | 511 +++ .../ext-py/boto-2.38.0/boto/emr/instance_group.py | 43 + desktop/core/ext-py/boto-2.38.0/boto/emr/step.py | 283 ++ .../core/ext-py/boto-2.38.0/boto/endpoints.json | 419 ++ desktop/core/ext-py/boto-2.38.0/boto/exception.py | 573 +++ desktop/core/ext-py/boto-2.38.0/boto/file/README | 49 + .../core/ext-py/boto-2.38.0/boto/file/__init__.py | 28 + .../core/ext-py/boto-2.38.0/boto/file/bucket.py | 112 + .../ext-py/boto-2.38.0/boto/file/connection.py | 33 + desktop/core/ext-py/boto-2.38.0/boto/file/key.py | 201 + .../boto-2.38.0/boto/file/simpleresultset.py | 30 + .../core/ext-py/boto-2.38.0/boto/fps/__init__.py | 21 + .../core/ext-py/boto-2.38.0/boto/fps/connection.py | 395 ++ .../core/ext-py/boto-2.38.0/boto/fps/exception.py | 344 ++ .../core/ext-py/boto-2.38.0/boto/fps/response.py | 207 + .../ext-py/boto-2.38.0/boto/glacier/__init__.py | 42 + .../ext-py/boto-2.38.0/boto/glacier/concurrent.py | 425 ++ .../ext-py/boto-2.38.0/boto/glacier/exceptions.py | 58 + .../core/ext-py/boto-2.38.0/boto/glacier/job.py | 177 + .../core/ext-py/boto-2.38.0/boto/glacier/layer1.py | 1279 ++++++ .../core/ext-py/boto-2.38.0/boto/glacier/layer2.py | 101 + .../ext-py/boto-2.38.0/boto/glacier/response.py | 49 + .../core/ext-py/boto-2.38.0/boto/glacier/utils.py | 175 + .../core/ext-py/boto-2.38.0/boto/glacier/vault.py | 450 ++ .../core/ext-py/boto-2.38.0/boto/glacier/writer.py | 262 ++ .../core/ext-py/boto-2.38.0/boto/gs/__init__.py | 22 + desktop/core/ext-py/boto-2.38.0/boto/gs/acl.py | 308 ++ desktop/core/ext-py/boto-2.38.0/boto/gs/bucket.py | 989 +++++ .../boto-2.38.0/boto/gs/bucketlistresultset.py | 64 + .../core/ext-py/boto-2.38.0/boto/gs/connection.py | 129 + desktop/core/ext-py/boto-2.38.0/boto/gs/cors.py | 169 + desktop/core/ext-py/boto-2.38.0/boto/gs/key.py | 946 ++++ .../core/ext-py/boto-2.38.0/boto/gs/lifecycle.py | 227 + .../boto/gs/resumable_upload_handler.py | 679 +++ desktop/core/ext-py/boto-2.38.0/boto/gs/user.py | 54 + desktop/core/ext-py/boto-2.38.0/boto/handler.py | 60 + .../ext-py/boto-2.38.0/boto/https_connection.py | 138 + .../core/ext-py/boto-2.38.0/boto/iam/__init__.py | 86 + .../core/ext-py/boto-2.38.0/boto/iam/connection.py | 1642 +++++++ .../core/ext-py/boto-2.38.0/boto/iam/summarymap.py | 42 + .../core/ext-py/boto-2.38.0/boto/jsonresponse.py | 168 + .../ext-py/boto-2.38.0/boto/kinesis/__init__.py | 41 + .../ext-py/boto-2.38.0/boto/kinesis/exceptions.py | 51 + .../core/ext-py/boto-2.38.0/boto/kinesis/layer1.py | 875 ++++ .../core/ext-py/boto-2.38.0/boto/kms/__init__.py | 41 + .../core/ext-py/boto-2.38.0/boto/kms/exceptions.py | 72 + desktop/core/ext-py/boto-2.38.0/boto/kms/layer1.py | 821 ++++ .../core/ext-py/boto-2.38.0/boto/logs/__init__.py | 41 + .../ext-py/boto-2.38.0/boto/logs/exceptions.py | 59 + .../core/ext-py/boto-2.38.0/boto/logs/layer1.py | 576 +++ .../boto-2.38.0/boto/machinelearning/__init__.py | 42 + .../boto-2.38.0/boto/machinelearning/exceptions.py | 51 + .../boto-2.38.0/boto/machinelearning/layer1.py | 1408 ++++++ .../ext-py/boto-2.38.0/boto/manage/__init__.py | 23 + .../ext-py/boto-2.38.0/boto/manage/cmdshell.py | 407 ++ .../core/ext-py/boto-2.38.0/boto/manage/propget.py | 63 + .../core/ext-py/boto-2.38.0/boto/manage/server.py | 556 +++ .../core/ext-py/boto-2.38.0/boto/manage/task.py | 176 + .../ext-py/boto-2.38.0/boto/manage/test_manage.py | 34 + .../core/ext-py/boto-2.38.0/boto/manage/volume.py | 420 ++ .../ext-py/boto-2.38.0/boto/mashups/__init__.py | 23 + .../ext-py/boto-2.38.0/boto/mashups/interactive.py | 97 + .../ext-py/boto-2.38.0/boto/mashups/iobject.py | 114 + .../core/ext-py/boto-2.38.0/boto/mashups/order.py | 211 + .../core/ext-py/boto-2.38.0/boto/mashups/server.py | 395 ++ .../core/ext-py/boto-2.38.0/boto/mturk/__init__.py | 23 + .../ext-py/boto-2.38.0/boto/mturk/connection.py | 1052 +++++ .../ext-py/boto-2.38.0/boto/mturk/layoutparam.py | 55 + .../ext-py/boto-2.38.0/boto/mturk/notification.py | 103 + .../core/ext-py/boto-2.38.0/boto/mturk/price.py | 48 + .../ext-py/boto-2.38.0/boto/mturk/qualification.py | 137 + .../core/ext-py/boto-2.38.0/boto/mturk/question.py | 455 ++ .../core/ext-py/boto-2.38.0/boto/mws/__init__.py | 21 + .../core/ext-py/boto-2.38.0/boto/mws/connection.py | 1168 +++++ .../core/ext-py/boto-2.38.0/boto/mws/exception.py | 70 + .../core/ext-py/boto-2.38.0/boto/mws/response.py | 787 ++++ .../ext-py/boto-2.38.0/boto/opsworks/__init__.py | 41 + .../ext-py/boto-2.38.0/boto/opsworks/exceptions.py | 30 + .../ext-py/boto-2.38.0/boto/opsworks/layer1.py | 3094 ++++++++++++++ desktop/core/ext-py/boto-2.38.0/boto/plugin.py | 93 + desktop/core/ext-py/boto-2.38.0/boto/provider.py | 452 ++ .../core/ext-py/boto-2.38.0/boto/pyami/__init__.py | 22 + .../ext-py/boto-2.38.0/boto/pyami/bootstrap.py | 134 + .../core/ext-py/boto-2.38.0/boto/pyami/config.py | 225 + .../core/ext-py/boto-2.38.0/boto/pyami/copybot.cfg | 60 + .../core/ext-py/boto-2.38.0/boto/pyami/copybot.py | 96 + .../ext-py/boto-2.38.0/boto/pyami/helloworld.py | 27 + .../boto-2.38.0/boto/pyami/installers/__init__.py | 63 + .../boto/pyami/installers/ubuntu/__init__.py | 22 + .../boto/pyami/installers/ubuntu/apache.py | 43 + .../boto/pyami/installers/ubuntu/ebs.py | 238 ++ .../boto/pyami/installers/ubuntu/installer.py | 94 + .../boto/pyami/installers/ubuntu/mysql.py | 108 + .../boto/pyami/installers/ubuntu/trac.py | 139 + .../ext-py/boto-2.38.0/boto/pyami/launch_ami.py | 177 + .../ext-py/boto-2.38.0/boto/pyami/scriptbase.py | 43 + .../core/ext-py/boto-2.38.0/boto/pyami/startup.py | 60 + .../core/ext-py/boto-2.38.0/boto/rds/__init__.py | 1623 +++++++ .../core/ext-py/boto-2.38.0/boto/rds/dbinstance.py | 416 ++ .../ext-py/boto-2.38.0/boto/rds/dbsecuritygroup.py | 186 + .../core/ext-py/boto-2.38.0/boto/rds/dbsnapshot.py | 138 + .../ext-py/boto-2.38.0/boto/rds/dbsubnetgroup.py | 69 + desktop/core/ext-py/boto-2.38.0/boto/rds/event.py | 49 + .../core/ext-py/boto-2.38.0/boto/rds/logfile.py | 68 + .../ext-py/boto-2.38.0/boto/rds/optiongroup.py | 404 ++ .../ext-py/boto-2.38.0/boto/rds/parametergroup.py | 201 + .../core/ext-py/boto-2.38.0/boto/rds/regioninfo.py | 33 + .../core/ext-py/boto-2.38.0/boto/rds/statusinfo.py | 54 + .../boto/rds/vpcsecuritygroupmembership.py | 85 + .../core/ext-py/boto-2.38.0/boto/rds2/__init__.py | 53 + .../ext-py/boto-2.38.0/boto/rds2/exceptions.py | 234 + .../core/ext-py/boto-2.38.0/boto/rds2/layer1.py | 3770 ++++++++++++++++ .../ext-py/boto-2.38.0/boto/redshift/__init__.py | 41 + .../ext-py/boto-2.38.0/boto/redshift/exceptions.py | 459 ++ .../ext-py/boto-2.38.0/boto/redshift/layer1.py | 3097 ++++++++++++++ desktop/core/ext-py/boto-2.38.0/boto/regioninfo.py | 187 + desktop/core/ext-py/boto-2.38.0/boto/requestlog.py | 39 + desktop/core/ext-py/boto-2.38.0/boto/resultset.py | 176 + .../ext-py/boto-2.38.0/boto/roboto/__init__.py | 1 + .../boto-2.38.0/boto/roboto/awsqueryrequest.py | 503 +++ .../boto-2.38.0/boto/roboto/awsqueryservice.py | 122 + .../core/ext-py/boto-2.38.0/boto/roboto/param.py | 147 + .../ext-py/boto-2.38.0/boto/route53/__init__.py | 87 + .../ext-py/boto-2.38.0/boto/route53/connection.py | 608 +++ .../boto-2.38.0/boto/route53/domains/__init__.py | 40 + .../boto-2.38.0/boto/route53/domains/exceptions.py | 46 + .../boto-2.38.0/boto/route53/domains/layer1.py | 868 ++++ .../ext-py/boto-2.38.0/boto/route53/exception.py | 27 + .../ext-py/boto-2.38.0/boto/route53/healthcheck.py | 146 + .../ext-py/boto-2.38.0/boto/route53/hostedzone.py | 51 + .../core/ext-py/boto-2.38.0/boto/route53/record.py | 374 ++ .../core/ext-py/boto-2.38.0/boto/route53/status.py | 42 + .../core/ext-py/boto-2.38.0/boto/route53/zone.py | 419 ++ .../core/ext-py/boto-2.38.0/boto/s3/__init__.py | 74 + desktop/core/ext-py/boto-2.38.0/boto/s3/acl.py | 171 + desktop/core/ext-py/boto-2.38.0/boto/s3/bucket.py | 1876 ++++++++ .../boto-2.38.0/boto/s3/bucketlistresultset.py | 156 + .../ext-py/boto-2.38.0/boto/s3/bucketlogging.py | 83 + .../core/ext-py/boto-2.38.0/boto/s3/connection.py | 665 +++ desktop/core/ext-py/boto-2.38.0/boto/s3/cors.py | 210 + .../ext-py/boto-2.38.0/boto/s3/deletemarker.py | 55 + desktop/core/ext-py/boto-2.38.0/boto/s3/key.py | 1921 +++++++++ desktop/core/ext-py/boto-2.38.0/boto/s3/keyfile.py | 134 + .../core/ext-py/boto-2.38.0/boto/s3/lifecycle.py | 236 + .../core/ext-py/boto-2.38.0/boto/s3/multidelete.py | 138 + .../core/ext-py/boto-2.38.0/boto/s3/multipart.py | 330 ++ desktop/core/ext-py/boto-2.38.0/boto/s3/prefix.py | 42 + .../boto/s3/resumable_download_handler.py | 352 ++ desktop/core/ext-py/boto-2.38.0/boto/s3/tagging.py | 71 + desktop/core/ext-py/boto-2.38.0/boto/s3/user.py | 49 + desktop/core/ext-py/boto-2.38.0/boto/s3/website.py | 293 ++ .../core/ext-py/boto-2.38.0/boto/sdb/__init__.py | 55 + .../core/ext-py/boto-2.38.0/boto/sdb/connection.py | 618 +++ .../ext-py/boto-2.38.0/boto/sdb/db/__init__.py | 20 + .../core/ext-py/boto-2.38.0/boto/sdb/db/blob.py | 76 + desktop/core/ext-py/boto-2.38.0/boto/sdb/db/key.py | 59 + .../boto-2.38.0/boto/sdb/db/manager/__init__.py | 85 + .../boto-2.38.0/boto/sdb/db/manager/sdbmanager.py | 738 ++++ .../boto-2.38.0/boto/sdb/db/manager/xmlmanager.py | 517 +++ .../core/ext-py/boto-2.38.0/boto/sdb/db/model.py | 296 ++ .../ext-py/boto-2.38.0/boto/sdb/db/property.py | 704 +++ .../core/ext-py/boto-2.38.0/boto/sdb/db/query.py | 86 + .../ext-py/boto-2.38.0/boto/sdb/db/sequence.py | 224 + .../core/ext-py/boto-2.38.0/boto/sdb/db/test_db.py | 231 + desktop/core/ext-py/boto-2.38.0/boto/sdb/domain.py | 380 ++ desktop/core/ext-py/boto-2.38.0/boto/sdb/item.py | 177 + .../ext-py/boto-2.38.0/boto/sdb/queryresultset.py | 93 + .../core/ext-py/boto-2.38.0/boto/sdb/regioninfo.py | 33 + .../ext-py/boto-2.38.0/boto/services/__init__.py | 23 + .../core/ext-py/boto-2.38.0/boto/services/bs.py | 180 + .../ext-py/boto-2.38.0/boto/services/message.py | 58 + .../ext-py/boto-2.38.0/boto/services/result.py | 135 + .../ext-py/boto-2.38.0/boto/services/service.py | 161 + .../ext-py/boto-2.38.0/boto/services/servicedef.py | 91 + .../ext-py/boto-2.38.0/boto/services/sonofmmm.cfg | 43 + .../ext-py/boto-2.38.0/boto/services/sonofmmm.py | 81 + .../ext-py/boto-2.38.0/boto/services/submit.py | 87 + .../core/ext-py/boto-2.38.0/boto/ses/__init__.py | 52 + .../core/ext-py/boto-2.38.0/boto/ses/connection.py | 565 +++ .../core/ext-py/boto-2.38.0/boto/ses/exceptions.py | 80 + .../core/ext-py/boto-2.38.0/boto/sns/__init__.py | 54 + .../core/ext-py/boto-2.38.0/boto/sns/connection.py | 765 ++++ .../core/ext-py/boto-2.38.0/boto/sqs/__init__.py | 46 + .../core/ext-py/boto-2.38.0/boto/sqs/attributes.py | 46 + .../ext-py/boto-2.38.0/boto/sqs/batchresults.py | 95 + .../core/ext-py/boto-2.38.0/boto/sqs/bigmessage.py | 119 + .../core/ext-py/boto-2.38.0/boto/sqs/connection.py | 537 +++ .../ext-py/boto-2.38.0/boto/sqs/jsonmessage.py | 43 + .../core/ext-py/boto-2.38.0/boto/sqs/message.py | 271 ++ .../boto-2.38.0/boto/sqs/messageattributes.py | 66 + desktop/core/ext-py/boto-2.38.0/boto/sqs/queue.py | 492 +++ .../core/ext-py/boto-2.38.0/boto/sqs/regioninfo.py | 33 + .../core/ext-py/boto-2.38.0/boto/storage_uri.py | 891 ++++ .../core/ext-py/boto-2.38.0/boto/sts/__init__.py | 52 + .../core/ext-py/boto-2.38.0/boto/sts/connection.py | 652 +++ .../ext-py/boto-2.38.0/boto/sts/credentials.py | 237 + .../ext-py/boto-2.38.0/boto/support/__init__.py | 41 + .../ext-py/boto-2.38.0/boto/support/exceptions.py | 58 + .../core/ext-py/boto-2.38.0/boto/support/layer1.py | 674 +++ .../core/ext-py/boto-2.38.0/boto/swf/__init__.py | 46 + .../core/ext-py/boto-2.38.0/boto/swf/exceptions.py | 44 + desktop/core/ext-py/boto-2.38.0/boto/swf/layer1.py | 1512 +++++++ .../boto-2.38.0/boto/swf/layer1_decisions.py | 287 ++ desktop/core/ext-py/boto-2.38.0/boto/swf/layer2.py | 347 ++ desktop/core/ext-py/boto-2.38.0/boto/utils.py | 1051 +++++ .../core/ext-py/boto-2.38.0/boto/vendored/six.py | 756 ++++ .../core/ext-py/boto-2.38.0/boto/vpc/__init__.py | 1828 ++++++++ .../ext-py/boto-2.38.0/boto/vpc/customergateway.py | 54 + .../ext-py/boto-2.38.0/boto/vpc/dhcpoptions.py | 72 + .../ext-py/boto-2.38.0/boto/vpc/internetgateway.py | 72 + .../core/ext-py/boto-2.38.0/boto/vpc/networkacl.py | 164 + .../core/ext-py/boto-2.38.0/boto/vpc/routetable.py | 115 + desktop/core/ext-py/boto-2.38.0/boto/vpc/subnet.py | 57 + desktop/core/ext-py/boto-2.38.0/boto/vpc/vpc.py | 204 + .../boto-2.38.0/boto/vpc/vpc_peering_connection.py | 163 + .../ext-py/boto-2.38.0/boto/vpc/vpnconnection.py | 204 + .../core/ext-py/boto-2.38.0/boto/vpc/vpngateway.py | 87 + .../ext-py/boto-2.38.0/docs/BotoCheatSheet.pdf | Bin 0 -> 48109 bytes desktop/core/ext-py/boto-2.38.0/docs/Makefile | 95 + desktop/core/ext-py/boto-2.38.0/docs/make.bat | 113 + .../boto-2.38.0/docs/source/_templates/layout.html | 3 + .../boto-2.38.0/docs/source/apps_built_on_boto.rst | 51 + .../boto-2.38.0/docs/source/autoscale_tut.rst | 218 + .../boto-2.38.0/docs/source/boto_config_tut.rst | 442 ++ .../docs/source/boto_theme/static/boto.css_t | 239 ++ .../docs/source/boto_theme/static/pygments.css | 61 + .../boto-2.38.0/docs/source/boto_theme/theme.conf | 3 + .../boto-2.38.0/docs/source/cloudfront_tut.rst | 197 + .../boto-2.38.0/docs/source/cloudsearch_tut.rst | 433 ++ .../boto-2.38.0/docs/source/cloudwatch_tut.rst | 117 + .../ext-py/boto-2.38.0/docs/source/commandline.rst | 85 + .../core/ext-py/boto-2.38.0/docs/source/conf.py | 37 + .../boto-2.38.0/docs/source/contributing.rst | 227 + .../boto-2.38.0/docs/source/documentation.rst | 59 + .../boto-2.38.0/docs/source/dynamodb2_tut.rst | 700 +++ .../boto-2.38.0/docs/source/dynamodb_tut.rst | 348 ++ .../ext-py/boto-2.38.0/docs/source/ec2_tut.rst | 247 ++ .../ext-py/boto-2.38.0/docs/source/elb_tut.rst | 236 + .../ext-py/boto-2.38.0/docs/source/emr_tut.rst | 107 + .../docs/source/extensions/githublinks/__init__.py | 55 + .../boto-2.38.0/docs/source/getting_started.rst | 187 + .../core/ext-py/boto-2.38.0/docs/source/index.rst | 253 ++ .../docs/source/migrations/dynamodb_v1_to_v2.rst | 366 ++ .../docs/source/migrations/rds_v1_to_v2.rst | 91 + .../boto-2.38.0/docs/source/porting_guide.rst | 67 + .../ext-py/boto-2.38.0/docs/source/rds_tut.rst | 117 + .../boto-2.38.0/docs/source/ref/autoscale.rst | 70 + .../boto-2.38.0/docs/source/ref/awslamba.rst | 26 + .../boto-2.38.0/docs/source/ref/beanstalk.rst | 26 + .../ext-py/boto-2.38.0/docs/source/ref/boto.rst | 47 + .../boto-2.38.0/docs/source/ref/cloudformation.rst | 34 + .../boto-2.38.0/docs/source/ref/cloudfront.rst | 68 + .../boto-2.38.0/docs/source/ref/cloudhsm.rst | 26 + .../boto-2.38.0/docs/source/ref/cloudsearch.rst | 61 + .../boto-2.38.0/docs/source/ref/cloudsearch2.rst | 54 + .../docs/source/ref/cloudsearchdomain.rst | 26 + .../boto-2.38.0/docs/source/ref/cloudtrail.rst | 26 + .../boto-2.38.0/docs/source/ref/cloudwatch.rst | 34 + .../boto-2.38.0/docs/source/ref/codedeploy.rst | 26 + .../docs/source/ref/cognito-identity.rst | 26 + .../boto-2.38.0/docs/source/ref/cognito-sync.rst | 26 + .../boto-2.38.0/docs/source/ref/configservice.rst | 26 + .../ext-py/boto-2.38.0/docs/source/ref/contrib.rst | 19 + .../boto-2.38.0/docs/source/ref/datapipeline.rst | 26 + .../boto-2.38.0/docs/source/ref/dynamodb.rst | 61 + .../boto-2.38.0/docs/source/ref/dynamodb2.rst | 61 + .../ext-py/boto-2.38.0/docs/source/ref/ec2.rst | 195 + .../docs/source/ref/ec2containerservice.rst | 26 + .../ext-py/boto-2.38.0/docs/source/ref/ecs.rst | 19 + .../boto-2.38.0/docs/source/ref/elasticache.rst | 19 + .../docs/source/ref/elastictranscoder.rst | 26 + .../ext-py/boto-2.38.0/docs/source/ref/elb.rst | 67 + .../ext-py/boto-2.38.0/docs/source/ref/emr.rst | 34 + .../ext-py/boto-2.38.0/docs/source/ref/file.rst | 34 + .../ext-py/boto-2.38.0/docs/source/ref/fps.rst | 19 + .../ext-py/boto-2.38.0/docs/source/ref/glacier.rst | 63 + .../core/ext-py/boto-2.38.0/docs/source/ref/gs.rst | 72 + .../ext-py/boto-2.38.0/docs/source/ref/iam.rst | 27 + .../ext-py/boto-2.38.0/docs/source/ref/index.rst | 42 + .../ext-py/boto-2.38.0/docs/source/ref/kinesis.rst | 26 + .../ext-py/boto-2.38.0/docs/source/ref/kms.rst | 26 + .../ext-py/boto-2.38.0/docs/source/ref/logs.rst | 26 + .../docs/source/ref/machinelearning.rst | 26 + .../ext-py/boto-2.38.0/docs/source/ref/manage.rst | 47 + .../ext-py/boto-2.38.0/docs/source/ref/mturk.rst | 54 + .../ext-py/boto-2.38.0/docs/source/ref/mws.rst | 33 + .../boto-2.38.0/docs/source/ref/opsworks.rst | 28 + .../ext-py/boto-2.38.0/docs/source/ref/pyami.rst | 103 + .../ext-py/boto-2.38.0/docs/source/ref/rds.rst | 47 + .../ext-py/boto-2.38.0/docs/source/ref/rds2.rst | 26 + .../boto-2.38.0/docs/source/ref/redshift.rst | 26 + .../ext-py/boto-2.38.0/docs/source/ref/route53.rst | 55 + .../boto-2.38.0/docs/source/ref/route53domains.rst | 26 + .../core/ext-py/boto-2.38.0/docs/source/ref/s3.rst | 111 + .../ext-py/boto-2.38.0/docs/source/ref/sdb.rst | 45 + .../ext-py/boto-2.38.0/docs/source/ref/sdb_db.rst | 70 + .../boto-2.38.0/docs/source/ref/services.rst | 61 + .../ext-py/boto-2.38.0/docs/source/ref/ses.rst | 21 + .../ext-py/boto-2.38.0/docs/source/ref/sns.rst | 17 + .../ext-py/boto-2.38.0/docs/source/ref/sqs.rst | 61 + .../ext-py/boto-2.38.0/docs/source/ref/sts.rst | 25 + .../ext-py/boto-2.38.0/docs/source/ref/support.rst | 26 + .../ext-py/boto-2.38.0/docs/source/ref/swf.rst | 32 + .../ext-py/boto-2.38.0/docs/source/ref/vpc.rst | 68 + .../boto-2.38.0/docs/source/releasenotes/dev.rst | 21 + .../source/releasenotes/releasenotes_template.rst | 21 + .../docs/source/releasenotes/v2.0.0.rst | 135 + .../docs/source/releasenotes/v2.0b1.rst | 14 + .../docs/source/releasenotes/v2.1.0.rst | 115 + .../docs/source/releasenotes/v2.1.1.rst | 7 + .../docs/source/releasenotes/v2.10.0.rst | 54 + .../docs/source/releasenotes/v2.11.0.rst | 62 + .../docs/source/releasenotes/v2.12.0.rst | 32 + .../docs/source/releasenotes/v2.13.0.rst | 40 + .../docs/source/releasenotes/v2.13.2.rst | 39 + .../docs/source/releasenotes/v2.13.3.rst | 11 + .../docs/source/releasenotes/v2.14.0.rst | 63 + .../docs/source/releasenotes/v2.15.0.rst | 40 + .../docs/source/releasenotes/v2.16.0.rst | 41 + .../docs/source/releasenotes/v2.17.0.rst | 21 + .../docs/source/releasenotes/v2.18.0.rst | 41 + .../docs/source/releasenotes/v2.19.0.rst | 24 + .../docs/source/releasenotes/v2.2.0.rst | 89 + .../docs/source/releasenotes/v2.2.1.rst | 6 + .../docs/source/releasenotes/v2.2.2.rst | 31 + .../docs/source/releasenotes/v2.20.0.rst | 31 + .../docs/source/releasenotes/v2.20.1.rst | 11 + .../docs/source/releasenotes/v2.21.0.rst | 43 + .../docs/source/releasenotes/v2.21.1.rst | 21 + .../docs/source/releasenotes/v2.21.2.rst | 13 + .../docs/source/releasenotes/v2.22.0.rst | 29 + .../docs/source/releasenotes/v2.22.1.rst | 22 + .../docs/source/releasenotes/v2.23.0.rst | 49 + .../docs/source/releasenotes/v2.24.0.rst | 36 + .../docs/source/releasenotes/v2.25.0.rst | 57 + .../docs/source/releasenotes/v2.26.0.rst | 59 + .../docs/source/releasenotes/v2.26.1.rst | 14 + .../docs/source/releasenotes/v2.27.0.rst | 33 + .../docs/source/releasenotes/v2.28.0.rst | 38 + .../docs/source/releasenotes/v2.29.0.rst | 25 + .../docs/source/releasenotes/v2.29.1.rst | 11 + .../docs/source/releasenotes/v2.3.0.rst | 47 + .../docs/source/releasenotes/v2.30.0.rst | 28 + .../docs/source/releasenotes/v2.31.0.rst | 11 + .../docs/source/releasenotes/v2.31.1.rst | 6 + .../docs/source/releasenotes/v2.32.0.rst | 113 + .../docs/source/releasenotes/v2.32.1.rst | 32 + .../docs/source/releasenotes/v2.33.0.rst | 61 + .../docs/source/releasenotes/v2.34.0.rst | 21 + .../docs/source/releasenotes/v2.35.0.rst | 55 + .../docs/source/releasenotes/v2.35.1.rst | 14 + .../docs/source/releasenotes/v2.35.2.rst | 16 + .../docs/source/releasenotes/v2.36.0.rst | 27 + .../docs/source/releasenotes/v2.37.0.rst | 39 + .../docs/source/releasenotes/v2.38.0.rst | 13 + .../docs/source/releasenotes/v2.4.0.rst | 60 + .../docs/source/releasenotes/v2.5.0.rst | 39 + .../docs/source/releasenotes/v2.5.1.rst | 6 + .../docs/source/releasenotes/v2.5.2.rst | 9 + .../docs/source/releasenotes/v2.6.0.rst | 101 + .../docs/source/releasenotes/v2.7.0.rst | 91 + .../docs/source/releasenotes/v2.8.0.rst | 45 + .../docs/source/releasenotes/v2.9.0.rst | 56 + .../docs/source/releasenotes/v2.9.1.rst | 48 + .../docs/source/releasenotes/v2.9.2.rst | 18 + .../docs/source/releasenotes/v2.9.3.rst | 53 + .../docs/source/releasenotes/v2.9.4.rst | 30 + .../docs/source/releasenotes/v2.9.5.rst | 32 + .../docs/source/releasenotes/v2.9.6.rst | 56 + .../docs/source/releasenotes/v2.9.7.rst | 40 + .../docs/source/releasenotes/v2.9.8.rst | 35 + .../docs/source/releasenotes/v2.9.9.rst | 50 + .../boto-2.38.0/docs/source/request_hook_tut.rst | 61 + .../ext-py/boto-2.38.0/docs/source/route53_tut.rst | 103 + .../core/ext-py/boto-2.38.0/docs/source/s3_tut.rst | 544 +++ .../boto-2.38.0/docs/source/security_groups.rst | 82 + .../ext-py/boto-2.38.0/docs/source/ses_tut.rst | 172 + .../boto-2.38.0/docs/source/simpledb_tut.rst | 198 + .../ext-py/boto-2.38.0/docs/source/sqs_tut.rst | 276 ++ .../ext-py/boto-2.38.0/docs/source/support_tut.rst | 154 + .../ext-py/boto-2.38.0/docs/source/swf_tut.rst | 663 +++ .../ext-py/boto-2.38.0/docs/source/vpc_tut.rst | 138 + desktop/core/ext-py/boto-2.38.0/pylintrc | 301 ++ desktop/core/ext-py/boto-2.38.0/setup.cfg | 8 + desktop/core/ext-py/boto-2.38.0/setup.py | 104 + desktop/core/ext-py/boto-2.38.0/tests/__init__.py | 20 + desktop/core/ext-py/boto-2.38.0/tests/compat.py | 38 + .../core/ext-py/boto-2.38.0/tests/db/test_lists.py | 96 + .../ext-py/boto-2.38.0/tests/db/test_password.py | 128 + .../core/ext-py/boto-2.38.0/tests/db/test_query.py | 152 + .../ext-py/boto-2.38.0/tests/db/test_sequence.py | 109 + .../ext-py/boto-2.38.0/tests/devpay/test_s3.py | 181 + desktop/core/ext-py/boto-2.38.0/tests/fps/test.py | 100 + .../boto-2.38.0/tests/fps/test_verify_signature.py | 12 + .../boto-2.38.0/tests/integration/__init__.py | 63 + .../tests/integration/awslambda/__init__.py | 21 + .../tests/integration/awslambda/test_awslambda.py | 38 + .../tests/integration/beanstalk/test_wrapper.py | 209 + .../tests/integration/cloudformation/__init__.py | 21 + .../cloudformation/test_cert_verification.py | 39 + .../integration/cloudformation/test_connection.py | 169 + .../tests/integration/cloudhsm/__init__.py | 21 + .../tests/integration/cloudhsm/test_cloudhsm.py | 44 + .../tests/integration/cloudsearch/__init__.py | 21 + .../cloudsearch/test_cert_verification.py | 39 + .../tests/integration/cloudsearch/test_layers.py | 75 + .../tests/integration/cloudsearch2/__init__.py | 21 + .../cloudsearch2/test_cert_verification.py | 39 + .../tests/integration/cloudsearch2/test_layers.py | 79 + .../cloudtrail/test_cert_verification.py | 38 + .../integration/cloudtrail/test_cloudtrail.py | 91 + .../tests/integration/codedeploy/__init__.py | 21 + .../integration/codedeploy/test_codedeploy.py | 41 + .../tests/integration/cognito/__init__.py | 41 + .../tests/integration/cognito/identity/__init__.py | 21 + .../cognito/identity/test_cognito_identity.py | 52 + .../tests/integration/cognito/sync/__init__.py | 21 + .../integration/cognito/sync/test_cognito_sync.py | 46 + .../tests/integration/configservice/__init__.py | 21 + .../configservice/test_configservice.py | 44 + .../datapipeline/test_cert_verification.py | 38 + .../tests/integration/datapipeline/test_layer1.py | 122 + .../directconnect/test_directconnect.py | 40 + .../tests/integration/dynamodb/__init__.py | 20 + .../integration/dynamodb/test_cert_verification.py | 39 + .../tests/integration/dynamodb/test_layer1.py | 266 ++ .../tests/integration/dynamodb/test_layer2.py | 496 +++ .../tests/integration/dynamodb/test_table.py | 84 + .../integration/dynamodb2/forum_test_data.json | 50 + .../dynamodb2/test_cert_verification.py | 39 + .../tests/integration/dynamodb2/test_highlevel.py | 821 ++++ .../tests/integration/dynamodb2/test_layer1.py | 363 ++ .../boto-2.38.0/tests/integration/ec2/__init__.py | 20 + .../tests/integration/ec2/autoscale/__init__.py | 20 + .../ec2/autoscale/test_cert_verification.py | 39 + .../integration/ec2/autoscale/test_connection.py | 182 + .../tests/integration/ec2/cloudwatch/__init__.py | 20 + .../ec2/cloudwatch/test_cert_verification.py | 38 + .../integration/ec2/cloudwatch/test_connection.py | 273 ++ .../tests/integration/ec2/elb/__init__.py | 20 + .../integration/ec2/elb/test_cert_verification.py | 39 + .../tests/integration/ec2/elb/test_connection.py | 297 ++ .../integration/ec2/test_cert_verification.py | 39 + .../tests/integration/ec2/test_connection.py | 246 ++ .../tests/integration/ec2/vpc/test_connection.py | 213 + .../integration/ec2containerservice/__init__.py | 21 + .../test_ec2containerservice.py | 40 + .../tests/integration/elasticache/test_layer1.py | 67 + .../elastictranscoder/test_cert_verification.py | 34 + .../integration/elastictranscoder/test_layer1.py | 115 + .../boto-2.38.0/tests/integration/emr/__init__.py | 20 + .../integration/emr/test_cert_verification.py | 39 + .../tests/integration/glacier/__init__.py | 22 + .../integration/glacier/test_cert_verification.py | 39 + .../tests/integration/glacier/test_layer1.py | 44 + .../tests/integration/glacier/test_layer2.py | 45 + .../tests/integration/gs/cb_test_harness.py | 76 + .../boto-2.38.0/tests/integration/gs/test_basic.py | 434 ++ .../integration/gs/test_generation_conditionals.py | 399 ++ .../integration/gs/test_resumable_downloads.py | 354 ++ .../tests/integration/gs/test_resumable_uploads.py | 552 +++ .../tests/integration/gs/test_storage_uri.py | 161 + .../tests/integration/gs/test_versioning.py | 267 ++ .../boto-2.38.0/tests/integration/gs/testcase.py | 116 + .../boto-2.38.0/tests/integration/gs/util.py | 86 + .../boto-2.38.0/tests/integration/iam/__init__.py | 20 + .../integration/iam/test_cert_verification.py | 39 + .../tests/integration/iam/test_connection.py | 47 + .../tests/integration/iam/test_password_policy.py | 80 + .../integration/kinesis/test_cert_verification.py | 38 + .../tests/integration/kinesis/test_kinesis.py | 116 + .../boto-2.38.0/tests/integration/kms/test_kms.py | 41 + .../integration/logs/test_cert_verification.py | 37 + .../tests/integration/logs/test_layer1.py | 43 + .../boto-2.38.0/tests/integration/mws/test.py | 122 + .../tests/integration/opsworks/test_layer1.py | 54 + .../boto-2.38.0/tests/integration/rds/__init__.py | 21 + .../integration/rds/test_cert_verification.py | 39 + .../tests/integration/rds/test_db_subnet_group.py | 92 + .../tests/integration/rds/test_promote_modify.py | 138 + .../boto-2.38.0/tests/integration/rds2/__init__.py | 21 + .../integration/rds2/test_cert_verification.py | 39 + .../tests/integration/rds2/test_connection.py | 93 + .../integration/redshift/test_cert_verification.py | 34 + .../tests/integration/redshift/test_layer1.py | 134 + .../tests/integration/route53/__init__.py | 39 + .../tests/integration/route53/domains/__init__.py | 21 + .../route53/domains/test_route53domains.py | 44 + .../route53/test_alias_resourcerecordsets.py | 114 + .../integration/route53/test_cert_verification.py | 41 + .../tests/integration/route53/test_health_check.py | 175 + .../integration/route53/test_resourcerecordsets.py | 91 + .../tests/integration/route53/test_zone.py | 196 + .../boto-2.38.0/tests/integration/s3/__init__.py | 20 + .../tests/integration/s3/mock_storage_service.py | 606 +++ .../tests/integration/s3/other_cacerts.txt | 70 + .../tests/integration/s3/test_bucket.py | 301 ++ .../tests/integration/s3/test_cert_verification.py | 39 + .../tests/integration/s3/test_connect_to_region.py | 73 + .../tests/integration/s3/test_connection.py | 245 ++ .../boto-2.38.0/tests/integration/s3/test_cors.py | 78 + .../tests/integration/s3/test_encryption.py | 114 + .../integration/s3/test_https_cert_validation.py | 141 + .../boto-2.38.0/tests/integration/s3/test_key.py | 534 +++ .../boto-2.38.0/tests/integration/s3/test_mfa.py | 95 + .../tests/integration/s3/test_multidelete.py | 181 + .../tests/integration/s3/test_multipart.py | 229 + .../boto-2.38.0/tests/integration/s3/test_pool.py | 245 ++ .../tests/integration/s3/test_versioning.py | 159 + .../boto-2.38.0/tests/integration/sdb/__init__.py | 20 + .../integration/sdb/test_cert_verification.py | 39 + .../tests/integration/sdb/test_connection.py | 118 + .../integration/ses/test_cert_verification.py | 39 + .../tests/integration/ses/test_connection.py | 38 + .../boto-2.38.0/tests/integration/sns/__init__.py | 20 + .../integration/sns/test_cert_verification.py | 39 + .../tests/integration/sns/test_connection.py | 68 + .../integration/sns/test_sns_sqs_subscription.py | 101 + .../boto-2.38.0/tests/integration/sqs/__init__.py | 20 + .../tests/integration/sqs/test_bigmessage.py | 78 + .../integration/sqs/test_cert_verification.py | 39 + .../tests/integration/sqs/test_connection.py | 304 ++ .../integration/storage_uri/test_storage_uri.py | 63 + .../boto-2.38.0/tests/integration/sts/__init__.py | 20 + .../integration/sts/test_cert_verification.py | 39 + .../tests/integration/sts/test_session_token.py | 91 + .../integration/support/test_cert_verification.py | 34 + .../tests/integration/support/test_layer1.py | 76 + .../integration/swf/test_cert_verification.py | 39 + .../tests/integration/swf/test_layer1.py | 246 ++ .../swf/test_layer1_workflow_execution.py | 173 + .../boto-2.38.0/tests/mturk/_init_environment.py | 28 + .../ext-py/boto-2.38.0/tests/mturk/all_tests.py | 24 + .../boto-2.38.0/tests/mturk/cleanup_tests.py | 47 + .../core/ext-py/boto-2.38.0/tests/mturk/common.py | 45 + .../boto-2.38.0/tests/mturk/create_hit_external.py | 21 + .../boto-2.38.0/tests/mturk/create_hit_test.py | 21 + .../tests/mturk/create_hit_with_qualifications.py | 16 + .../boto-2.38.0/tests/mturk/hit_persistence.py | 27 + .../core/ext-py/boto-2.38.0/tests/mturk/mocks.py | 11 + .../ext-py/boto-2.38.0/tests/mturk/run-doctest.py | 13 + .../boto-2.38.0/tests/mturk/selenium_support.py | 61 + .../core/ext-py/boto-2.38.0/tests/mturk/support.py | 7 + .../boto-2.38.0/tests/mturk/test_disable_hit.py | 11 + desktop/core/ext-py/boto-2.38.0/tests/test.py | 118 + .../core/ext-py/boto-2.38.0/tests/unit/__init__.py | 110 + .../boto-2.38.0/tests/unit/auth/test_sigv4.py | 594 +++ .../boto-2.38.0/tests/unit/auth/test_stsanon.py | 78 + .../boto-2.38.0/tests/unit/awslambda/__init__.py | 21 + .../tests/unit/awslambda/test_awslambda.py | 117 + .../tests/unit/beanstalk/test_exception.py | 49 + .../tests/unit/beanstalk/test_layer1.py | 149 + .../tests/unit/cloudformation/test_connection.py | 724 ++++ .../tests/unit/cloudformation/test_stack.py | 256 ++ .../tests/unit/cloudfront/test_connection.py | 204 + .../tests/unit/cloudfront/test_distribution.py | 21 + .../unit/cloudfront/test_invalidation_list.py | 112 + .../tests/unit/cloudfront/test_signed_urls.py | 367 ++ .../boto-2.38.0/tests/unit/cloudsearch/__init__.py | 1 + .../tests/unit/cloudsearch/test_connection.py | 230 + .../tests/unit/cloudsearch/test_document.py | 337 ++ .../tests/unit/cloudsearch/test_exceptions.py | 37 + .../tests/unit/cloudsearch/test_search.py | 428 ++ .../tests/unit/cloudsearch2/__init__.py | 18 + .../tests/unit/cloudsearch2/test_connection.py | 250 ++ .../tests/unit/cloudsearch2/test_document.py | 346 ++ .../tests/unit/cloudsearch2/test_exceptions.py | 37 + .../tests/unit/cloudsearch2/test_search.py | 387 ++ .../cloudsearchdomain/test_cloudsearchdomain.py | 127 + .../tests/unit/cloudtrail/test_layer1.py | 81 + .../tests/unit/directconnect/test_layer1.py | 58 + .../boto-2.38.0/tests/unit/dynamodb/test_batch.py | 103 + .../boto-2.38.0/tests/unit/dynamodb/test_layer2.py | 119 + .../boto-2.38.0/tests/unit/dynamodb/test_types.py | 142 + .../tests/unit/dynamodb2/test_layer1.py | 53 + .../boto-2.38.0/tests/unit/dynamodb2/test_table.py | 3066 +++++++++++++ .../tests/unit/ec2/autoscale/test_group.py | 881 ++++ .../tests/unit/ec2/cloudwatch/test_connection.py | 98 + .../tests/unit/ec2/elb/test_attribute.py | 205 + .../tests/unit/ec2/elb/test_listener.py | 125 + .../tests/unit/ec2/elb/test_loadbalancer.py | 143 + .../boto-2.38.0/tests/unit/ec2/test_address.py | 145 + .../tests/unit/ec2/test_blockdevicemapping.py | 146 + .../boto-2.38.0/tests/unit/ec2/test_connection.py | 1703 ++++++++ .../boto-2.38.0/tests/unit/ec2/test_ec2object.py | 211 + .../boto-2.38.0/tests/unit/ec2/test_instance.py | 277 ++ .../tests/unit/ec2/test_instancestatus.py | 42 + .../tests/unit/ec2/test_instancetype.py | 118 + .../tests/unit/ec2/test_networkinterface.py | 273 ++ .../tests/unit/ec2/test_reservedinstance.py | 44 + .../tests/unit/ec2/test_securitygroup.py | 211 + .../boto-2.38.0/tests/unit/ec2/test_snapshot.py | 61 + .../tests/unit/ec2/test_spotinstance.py | 110 + .../boto-2.38.0/tests/unit/ec2/test_volume.py | 268 ++ .../boto-2.38.0/tests/unit/ecs/test_connection.py | 70 + .../tests/unit/elasticache/test_api_interface.py | 20 + .../boto-2.38.0/tests/unit/emr/test_connection.py | 1004 +++++ .../tests/unit/emr/test_emr_responses.py | 388 ++ .../tests/unit/emr/test_instance_group_args.py | 57 + .../tests/unit/glacier/test_concurrent.py | 173 + .../boto-2.38.0/tests/unit/glacier/test_job.py | 81 + .../boto-2.38.0/tests/unit/glacier/test_layer1.py | 98 + .../boto-2.38.0/tests/unit/glacier/test_layer2.py | 338 ++ .../tests/unit/glacier/test_response.py | 35 + .../boto-2.38.0/tests/unit/glacier/test_utils.py | 165 + .../boto-2.38.0/tests/unit/glacier/test_vault.py | 176 + .../boto-2.38.0/tests/unit/glacier/test_writer.py | 229 + .../boto-2.38.0/tests/unit/iam/test_connection.py | 481 +++ .../boto-2.38.0/tests/unit/kinesis/test_kinesis.py | 74 + .../ext-py/boto-2.38.0/tests/unit/kms/__init__.py | 21 + .../ext-py/boto-2.38.0/tests/unit/kms/test_kms.py | 63 + .../boto-2.38.0/tests/unit/logs/test_layer1.py | 22 + .../tests/unit/machinelearning/__init__.py | 21 + .../unit/machinelearning/test_machinelearning.py | 45 + .../boto-2.38.0/tests/unit/manage/test_ssh.py | 54 + .../tests/unit/mturk/test_connection.py | 28 + .../boto-2.38.0/tests/unit/mws/test_connection.py | 205 + .../boto-2.38.0/tests/unit/mws/test_response.py | 229 + .../tests/unit/provider/test_provider.py | 441 ++ .../boto-2.38.0/tests/unit/rds/test_connection.py | 787 ++++ .../boto-2.38.0/tests/unit/rds/test_snapshot.py | 296 ++ .../boto-2.38.0/tests/unit/rds2/test_connection.py | 209 + .../tests/unit/route53/test_connection.py | 802 ++++ .../boto-2.38.0/tests/unit/route53/test_zone.py | 63 + .../boto-2.38.0/tests/unit/s3/test_bucket.py | 231 + .../boto-2.38.0/tests/unit/s3/test_connection.py | 240 ++ .../tests/unit/s3/test_cors_configuration.py | 77 + .../ext-py/boto-2.38.0/tests/unit/s3/test_key.py | 239 ++ .../boto-2.38.0/tests/unit/s3/test_keyfile.py | 114 + .../boto-2.38.0/tests/unit/s3/test_lifecycle.py | 97 + .../boto-2.38.0/tests/unit/s3/test_tagging.py | 47 + .../ext-py/boto-2.38.0/tests/unit/s3/test_uri.py | 265 ++ .../boto-2.38.0/tests/unit/s3/test_website.py | 230 + .../boto-2.38.0/tests/unit/ses/test_identity.py | 193 + .../boto-2.38.0/tests/unit/sns/test_connection.py | 281 ++ .../boto-2.38.0/tests/unit/sqs/test_connection.py | 313 ++ .../boto-2.38.0/tests/unit/sqs/test_message.py | 116 + .../boto-2.38.0/tests/unit/sqs/test_queue.py | 51 + .../boto-2.38.0/tests/unit/sts/test_connection.py | 244 ++ .../boto-2.38.0/tests/unit/sts/test_credentials.py | 38 + .../tests/unit/swf/test_layer1_decisions.py | 35 + .../tests/unit/swf/test_layer2_actors.py | 87 + .../boto-2.38.0/tests/unit/swf/test_layer2_base.py | 31 + .../tests/unit/swf/test_layer2_domain.py | 116 + .../tests/unit/swf/test_layer2_types.py | 46 + .../boto-2.38.0/tests/unit/test_connection.py | 539 +++ .../boto-2.38.0/tests/unit/test_endpoints.json | 5 + .../boto-2.38.0/tests/unit/test_exception.py | 123 + .../boto-2.38.0/tests/unit/test_regioninfo.py | 144 + .../boto-2.38.0/tests/unit/utils/test_utils.py | 321 ++ .../ext-py/boto-2.38.0/tests/unit/vpc/__init__.py | 3 + .../tests/unit/vpc/test_customergateway.py | 115 + .../boto-2.38.0/tests/unit/vpc/test_dhcpoptions.py | 215 + .../tests/unit/vpc/test_internetgateway.py | 152 + .../boto-2.38.0/tests/unit/vpc/test_networkacl.py | 521 +++ .../boto-2.38.0/tests/unit/vpc/test_routetable.py | 440 ++ .../boto-2.38.0/tests/unit/vpc/test_subnet.py | 133 + .../ext-py/boto-2.38.0/tests/unit/vpc/test_vpc.py | 367 ++ .../tests/unit/vpc/test_vpc_peering_connection.py | 275 ++ .../tests/unit/vpc/test_vpnconnection.py | 254 ++ .../boto-2.38.0/tests/unit/vpc/test_vpngateway.py | 220 + ext/thirdparty/README.md | 1 + 846 files changed, 180343 insertions(+) create mode 100644 desktop/core/ext-py/boto-2.38.0/.gitignore create mode 100644 desktop/core/ext-py/boto-2.38.0/MANIFEST.in create mode 100644 desktop/core/ext-py/boto-2.38.0/PKG-INFO create mode 100644 desktop/core/ext-py/boto-2.38.0/README.rst create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/asadmin create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/bundle_image create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/cfadmin create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/cq create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/cwutil create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/dynamodb_dump create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/dynamodb_load create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/elbadmin create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/fetch_file create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/glacier create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/instance_events create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/kill_instance create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/launch_instance create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/list_instances create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/lss3 create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/mturk create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/pyami_sendmail create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/route53 create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/s3put create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/sdbadmin create mode 100755 desktop/core/ext-py/boto-2.38.0/bin/taskadmin create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/auth.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/auth_handler.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/awslambda/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/awslambda/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/awslambda/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/beanstalk/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/beanstalk/exception.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/beanstalk/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/beanstalk/response.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/beanstalk/wrapper.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cacerts/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cacerts/cacerts.txt create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudformation/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudformation/connection.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/cloudformation/stack.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudformation/template.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudfront/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudfront/distribution.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudfront/exception.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudfront/identity.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudfront/invalidation.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudfront/logging.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudfront/object.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudfront/origin.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudfront/signers.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudhsm/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudhsm/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudhsm/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/document.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/domain.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/layer2.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/optionstatus.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/search.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/sourceattribute.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/document.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/domain.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/layer2.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/optionstatus.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/search.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearchdomain/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearchdomain/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudsearchdomain/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudtrail/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudtrail/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cloudtrail/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/codedeploy/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/codedeploy/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/codedeploy/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cognito/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cognito/identity/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cognito/identity/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cognito/identity/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cognito/sync/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cognito/sync/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/cognito/sync/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/compat.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/configservice/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/configservice/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/configservice/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/contrib/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/contrib/ymlmessage.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/datapipeline/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/datapipeline/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/datapipeline/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/directconnect/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/directconnect/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/directconnect/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb/batch.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb/condition.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb/item.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb/layer2.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb/schema.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb/table.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb/types.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/fields.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/items.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/results.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/table.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/types.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/address.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/attributes.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/activity.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/group.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/instance.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/launchconfig.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/limits.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/policy.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/request.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/scheduled.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/tag.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/blockdevicemapping.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/bundleinstance.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/buyreservation.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/alarm.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/datapoint.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/dimension.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/listelement.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/metric.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/ec2object.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/attributes.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/healthcheck.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/instancestate.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/listelement.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/listener.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/loadbalancer.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/policies.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/securitygroup.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/group.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/image.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/instance.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/instanceinfo.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/instancestatus.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/instancetype.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/keypair.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/launchspecification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/networkinterface.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/placementgroup.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/regioninfo.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/reservedinstance.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/securitygroup.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/snapshot.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/spotdatafeedsubscription.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/spotinstancerequest.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/spotpricehistory.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/tag.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/volume.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/volumestatus.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2/zone.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ecs/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ecs/item.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/elasticache/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/elasticache/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/emr/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/emr/bootstrap_action.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/emr/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/emr/emrobject.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/emr/instance_group.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/emr/step.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/endpoints.json create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/exception.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/file/README create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/file/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/file/bucket.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/file/connection.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/file/key.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/file/simpleresultset.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/fps/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/fps/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/fps/exception.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/fps/response.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/glacier/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/glacier/concurrent.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/glacier/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/glacier/job.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/glacier/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/glacier/layer2.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/glacier/response.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/glacier/utils.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/glacier/vault.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/glacier/writer.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/gs/__init__.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/gs/acl.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/gs/bucket.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/gs/bucketlistresultset.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/gs/connection.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/gs/cors.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/gs/key.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/gs/lifecycle.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/gs/resumable_upload_handler.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/gs/user.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/handler.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/https_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/iam/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/iam/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/iam/summarymap.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/jsonresponse.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/kinesis/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/kinesis/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/kinesis/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/kms/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/kms/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/kms/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/logs/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/logs/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/logs/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/machinelearning/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/machinelearning/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/machinelearning/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/manage/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/manage/cmdshell.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/manage/propget.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/manage/server.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/manage/task.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/manage/test_manage.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/manage/volume.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mashups/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mashups/interactive.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mashups/iobject.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mashups/order.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mashups/server.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mturk/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mturk/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mturk/layoutparam.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mturk/notification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mturk/price.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mturk/qualification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mturk/question.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mws/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mws/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mws/exception.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/mws/response.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/opsworks/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/opsworks/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/opsworks/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/plugin.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/provider.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/bootstrap.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/config.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/copybot.cfg create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/copybot.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/helloworld.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/apache.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/ebs.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/installer.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/mysql.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/trac.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/pyami/launch_ami.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/scriptbase.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/pyami/startup.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds/dbinstance.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds/dbsecuritygroup.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds/dbsnapshot.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds/dbsubnetgroup.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds/event.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds/logfile.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds/optiongroup.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds/parametergroup.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds/regioninfo.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds/statusinfo.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds/vpcsecuritygroupmembership.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds2/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds2/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/rds2/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/redshift/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/redshift/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/redshift/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/regioninfo.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/requestlog.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/resultset.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/roboto/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/roboto/awsqueryrequest.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/roboto/awsqueryservice.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/roboto/param.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/route53/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/route53/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/route53/domains/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/route53/domains/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/route53/domains/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/route53/exception.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/route53/healthcheck.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/route53/hostedzone.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/route53/record.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/route53/status.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/route53/zone.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/acl.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/bucket.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/bucketlistresultset.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/bucketlogging.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/cors.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/deletemarker.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/key.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/keyfile.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/lifecycle.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/multidelete.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/multipart.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/prefix.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/resumable_download_handler.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/tagging.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/user.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/s3/website.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/db/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/db/blob.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/db/key.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/db/manager/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/db/manager/sdbmanager.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/db/manager/xmlmanager.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/db/model.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/db/property.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/db/query.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/db/sequence.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/db/test_db.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/domain.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/item.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/queryresultset.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sdb/regioninfo.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/services/__init__.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/services/bs.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/services/message.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/services/result.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/services/service.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/services/servicedef.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/services/sonofmmm.cfg create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/services/sonofmmm.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/services/submit.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ses/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ses/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/ses/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sns/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sns/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sqs/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sqs/attributes.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sqs/batchresults.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sqs/bigmessage.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sqs/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sqs/jsonmessage.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sqs/message.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sqs/messageattributes.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sqs/queue.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sqs/regioninfo.py create mode 100755 desktop/core/ext-py/boto-2.38.0/boto/storage_uri.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sts/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sts/connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/sts/credentials.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/support/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/support/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/support/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/swf/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/swf/exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/swf/layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/swf/layer1_decisions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/swf/layer2.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/utils.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vendored/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vendored/six.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vpc/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vpc/customergateway.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vpc/dhcpoptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vpc/internetgateway.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vpc/networkacl.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vpc/routetable.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vpc/subnet.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vpc/vpc.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vpc/vpc_peering_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vpc/vpnconnection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/boto/vpc/vpngateway.py create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/BotoCheatSheet.pdf create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/Makefile create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/make.bat create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/_templates/layout.html create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/apps_built_on_boto.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/autoscale_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/boto_config_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/boto_theme/static/boto.css_t create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/boto_theme/static/pygments.css create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/boto_theme/theme.conf create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/cloudfront_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/cloudsearch_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/cloudwatch_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/commandline.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/conf.py create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/contributing.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/documentation.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/dynamodb2_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/dynamodb_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ec2_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/elb_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/emr_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/extensions/githublinks/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/getting_started.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/index.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/migrations/dynamodb_v1_to_v2.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/migrations/rds_v1_to_v2.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/porting_guide.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/rds_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/autoscale.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/awslamba.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/beanstalk.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/boto.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudformation.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudfront.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudhsm.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudsearch.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudsearch2.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudsearchdomain.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudtrail.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudwatch.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/codedeploy.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/cognito-identity.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/cognito-sync.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/configservice.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/contrib.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/datapipeline.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/dynamodb.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/dynamodb2.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/ec2.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/ec2containerservice.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/ecs.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/elasticache.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/elastictranscoder.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/elb.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/emr.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/file.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/fps.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/glacier.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/gs.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/iam.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/index.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/kinesis.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/kms.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/logs.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/machinelearning.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/manage.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/mturk.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/mws.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/opsworks.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/pyami.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/rds.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/rds2.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/redshift.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/route53.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/route53domains.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/s3.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/sdb.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/sdb_db.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/services.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/ses.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/sns.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/sqs.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/sts.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/support.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/swf.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ref/vpc.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/dev.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/releasenotes_template.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.0.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.0b1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.1.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.1.1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.10.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.11.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.12.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.13.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.13.2.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.13.3.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.14.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.15.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.16.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.17.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.18.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.19.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.2.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.2.1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.2.2.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.20.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.20.1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.21.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.21.1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.21.2.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.22.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.22.1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.23.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.24.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.25.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.26.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.26.1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.27.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.28.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.29.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.29.1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.3.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.30.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.31.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.31.1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.32.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.32.1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.33.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.34.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.35.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.35.1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.35.2.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.36.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.37.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.38.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.4.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.5.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.5.1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.5.2.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.6.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.7.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.8.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.0.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.1.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.2.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.3.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.4.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.5.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.6.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.7.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.8.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.9.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/request_hook_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/route53_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/s3_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/security_groups.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/ses_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/simpledb_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/sqs_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/support_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/swf_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/docs/source/vpc_tut.rst create mode 100644 desktop/core/ext-py/boto-2.38.0/pylintrc create mode 100644 desktop/core/ext-py/boto-2.38.0/setup.cfg create mode 100644 desktop/core/ext-py/boto-2.38.0/setup.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/compat.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/db/test_lists.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/db/test_password.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/db/test_query.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/db/test_sequence.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/devpay/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/devpay/test_s3.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/fps/__init__.py create mode 100755 desktop/core/ext-py/boto-2.38.0/tests/fps/test.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/fps/test_verify_signature.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/awslambda/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/awslambda/test_awslambda.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/beanstalk/test_wrapper.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudformation/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudformation/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudformation/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudhsm/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudhsm/test_cloudhsm.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch/test_layers.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch2/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch2/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch2/test_layers.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudtrail/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudtrail/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cloudtrail/test_cloudtrail.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/codedeploy/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/codedeploy/test_codedeploy.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/identity/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/identity/test_cognito_identity.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/sync/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/sync/test_cognito_sync.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/configservice/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/configservice/test_configservice.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/datapipeline/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/datapipeline/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/datapipeline/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/directconnect/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/directconnect/test_directconnect.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_layer2.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_table.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/forum_test_data.json create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/test_highlevel.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/autoscale/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/autoscale/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/autoscale/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/cloudwatch/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/cloudwatch/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/cloudwatch/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/elb/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/elb/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/elb/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/vpc/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/vpc/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2containerservice/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ec2containerservice/test_ec2containerservice.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/elasticache/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/elasticache/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/elastictranscoder/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/elastictranscoder/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/elastictranscoder/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/emr/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/emr/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/test_layer2.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/gs/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/gs/cb_test_harness.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_basic.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_generation_conditionals.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_resumable_downloads.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_resumable_uploads.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_storage_uri.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_versioning.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/gs/testcase.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/gs/util.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/iam/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/iam/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/iam/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/iam/test_password_policy.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/kinesis/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/kinesis/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/kinesis/test_kinesis.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/kms/test_kms.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/logs/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/logs/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/logs/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/mws/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/mws/test.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/opsworks/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/opsworks/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/rds/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/rds/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/rds/test_db_subnet_group.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/rds/test_promote_modify.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/rds2/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/rds2/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/rds2/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/redshift/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/redshift/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/redshift/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/route53/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/route53/domains/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/route53/domains/test_route53domains.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_alias_resourcerecordsets.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_health_check.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_resourcerecordsets.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_zone.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/mock_storage_service.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/other_cacerts.txt create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_bucket.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_connect_to_region.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_cors.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_encryption.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_https_cert_validation.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_key.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_mfa.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_multidelete.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_multipart.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_pool.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_versioning.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sdb/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sdb/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sdb/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ses/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ses/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/ses/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sns/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sns/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sns/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sns/test_sns_sqs_subscription.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/test_bigmessage.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/storage_uri/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/storage_uri/test_storage_uri.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sts/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sts/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/sts/test_session_token.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/support/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/support/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/support/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/swf/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/swf/test_cert_verification.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/swf/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/integration/swf/test_layer1_workflow_execution.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/_init_environment.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/all_tests.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/cleanup_tests.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/common.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/create_hit_external.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/create_hit_test.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/create_hit_with_qualifications.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/hit_persistence.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/mocks.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/run-doctest.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/selenium_support.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/support.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/mturk/test_disable_hit.py create mode 100755 desktop/core/ext-py/boto-2.38.0/tests/test.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/auth/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/auth/test_sigv4.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/auth/test_stsanon.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/awslambda/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/awslambda/test_awslambda.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/beanstalk/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/beanstalk/test_exception.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/beanstalk/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudformation/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudformation/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudformation/test_stack.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_distribution.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_invalidation_list.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_signed_urls.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_document.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_search.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_document.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_exceptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_search.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearchdomain/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudtrail/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/cloudtrail/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/directconnect/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/directconnect/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/test_batch.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/test_layer2.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/test_types.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb2/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb2/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb2/test_table.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/autoscale/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/autoscale/test_group.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/cloudwatch/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/cloudwatch/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/test_attribute.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/test_listener.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/test_loadbalancer.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_address.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_blockdevicemapping.py create mode 100755 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_ec2object.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_instance.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_instancestatus.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_instancetype.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_networkinterface.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_reservedinstance.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_securitygroup.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_snapshot.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_spotinstance.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_volume.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ecs/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ecs/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/elasticache/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/elasticache/test_api_interface.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/emr/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/emr/test_emr_responses.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/emr/test_instance_group_args.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_concurrent.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_job.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_layer2.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_response.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_utils.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_vault.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_writer.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/iam/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/iam/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/kinesis/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/kinesis/test_kinesis.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/kms/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/kms/test_kms.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/logs/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/logs/test_layer1.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/machinelearning/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/machinelearning/test_machinelearning.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/manage/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/manage/test_ssh.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/mturk/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/mturk/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/mws/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/mws/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/mws/test_response.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/provider/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/provider/test_provider.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/rds/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/rds/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/rds/test_snapshot.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/rds2/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/rds2/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/route53/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/route53/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/route53/test_zone.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/s3/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_bucket.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_cors_configuration.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_key.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_keyfile.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_lifecycle.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_tagging.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_uri.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_website.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ses/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/ses/test_identity.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/sns/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/sns/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/test_message.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/test_queue.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/sts/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/sts/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/sts/test_credentials.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/swf/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer1_decisions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_actors.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_base.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_domain.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_types.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/test_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/test_endpoints.json create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/test_exception.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/test_regioninfo.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/utils/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/utils/test_utils.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/__init__.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_customergateway.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_dhcpoptions.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_internetgateway.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_networkacl.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_routetable.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_subnet.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpc.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpc_peering_connection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpnconnection.py create mode 100644 desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpngateway.py diff --git a/desktop/core/ext-py/boto-2.38.0/.gitignore b/desktop/core/ext-py/boto-2.38.0/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..9ed67ef192fa20435d5ca11362c3fb84bd31f5f1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/.gitignore @@ -0,0 +1,16 @@ +*.pyc +.*.swp +*.log +*~ +boto.egg-info +build/ +dist/ +MANIFEST +.DS_Store +.idea +.tox +.coverage +*flymake.py +venv +venv-2.5 +env-2.5 diff --git a/desktop/core/ext-py/boto-2.38.0/MANIFEST.in b/desktop/core/ext-py/boto-2.38.0/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..26c87ea74c005df540c32b2e0c7e7a554db81060 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/MANIFEST.in @@ -0,0 +1,12 @@ +include boto/cacerts/cacerts.txt +include README.rst +include boto/file/README +include .gitignore +include pylintrc +include boto/endpoints.json +include boto/pyami/copybot.cfg +include boto/services/sonofmmm.cfg +include boto/mturk/test/*.doctest +include boto/mturk/test/.gitignore +recursive-include tests *.json *.py *.txt +recursive-include docs * diff --git a/desktop/core/ext-py/boto-2.38.0/PKG-INFO b/desktop/core/ext-py/boto-2.38.0/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..fd0977598830d4c8f3449bd2cc76f0687b38db24 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/PKG-INFO @@ -0,0 +1,230 @@ +Metadata-Version: 1.1 +Name: boto +Version: 2.38.0 +Summary: Amazon Web Services Library +Home-page: https://github.com/boto/boto/ +Author: Mitch Garnaat +Author-email: mitch@garnaat.com +License: MIT +Description: #### + boto + #### + boto 2.38.0 + + Released: 9-Apr-2015 + + .. image:: https://travis-ci.org/boto/boto.svg?branch=develop + :target: https://travis-ci.org/boto/boto + + .. image:: https://pypip.in/d/boto/badge.svg + :target: https://pypi.python.org/pypi/boto/ + + ************ + Introduction + ************ + + Boto is a Python package that provides interfaces to Amazon Web Services. + Currently, all features work with Python 2.6 and 2.7. Work is under way to + support Python 3.3+ in the same codebase. Modules are being ported one at + a time with the help of the open source community, so please check below + for compatibility with Python 3.3+. + + To port a module to Python 3.3+, please view our `Contributing Guidelines`_ + and the `Porting Guide`_. If you would like, you can open an issue to let + others know about your work in progress. Tests **must** pass on Python + 2.6, 2.7, 3.3, and 3.4 for pull requests to be accepted. + + ****** + Boto 3 + ****** + The next major version of Boto is currently in developer preview and can + be found in the `Boto 3 `__ + repository and installed via ``pip``. It supports the latest service APIs + and provides a high-level object-oriented interface to many services. + + Please try Boto 3 and + `leave feedback `__ with any issues, + suggestions, and feature requests you might have. + + ******** + Services + ******** + + At the moment, boto supports: + + * Compute + + * Amazon Elastic Compute Cloud (EC2) (Python 3) + * Amazon Elastic Map Reduce (EMR) (Python 3) + * AutoScaling (Python 3) + * Amazon Kinesis (Python 3) + * AWS Lambda (Python 3) + * Amazon EC2 Container Service (Python 3) + + * Content Delivery + + * Amazon CloudFront (Python 3) + + * Database + + * Amazon Relational Data Service (RDS) + * Amazon DynamoDB (Python 3) + * Amazon SimpleDB (Python 3) + * Amazon ElastiCache (Python 3) + * Amazon Redshift (Python 3) + + * Deployment and Management + + * AWS Elastic Beanstalk (Python 3) + * AWS CloudFormation (Python 3) + * AWS Data Pipeline (Python 3) + * AWS Opsworks (Python 3) + * AWS CloudTrail (Python 3) + * AWS CodeDeploy (Python 3) + + * Administration & Security + + * AWS Identity and Access Management (IAM) (Python 3) + * AWS Key Management Service (KMS) (Python 3) + * AWS Config (Python 3) + * AWS CloudHSM (Python 3) + + * Application Services + + * Amazon CloudSearch (Python 3) + * Amazon CloudSearch Domain (Python 3) + * Amazon Elastic Transcoder (Python 3) + * Amazon Simple Workflow Service (SWF) (Python 3) + * Amazon Simple Queue Service (SQS) (Python 3) + * Amazon Simple Notification Server (SNS) (Python 3) + * Amazon Simple Email Service (SES) (Python 3) + * Amazon Cognito Identity (Python 3) + * Amazon Cognito Sync (Python 3) + * Amazon Machine Learning (Python 3) + + * Monitoring + + * Amazon CloudWatch (EC2 Only) (Python 3) + * Amazon CloudWatch Logs (Python 3) + + * Networking + + * Amazon Route53 (Python 3) + * Amazon Route 53 Domains (Python 3) + * Amazon Virtual Private Cloud (VPC) (Python 3) + * Elastic Load Balancing (ELB) (Python 3) + * AWS Direct Connect (Python 3) + + * Payments and Billing + + * Amazon Flexible Payment Service (FPS) + + * Storage + + * Amazon Simple Storage Service (S3) (Python 3) + * Amazon Glacier (Python 3) + * Amazon Elastic Block Store (EBS) + * Google Cloud Storage + + * Workforce + + * Amazon Mechanical Turk + + * Other + + * Marketplace Web Services (Python 3) + * AWS Support (Python 3) + + The goal of boto is to support the full breadth and depth of Amazon + Web Services. In addition, boto provides support for other public + services such as Google Storage in addition to private cloud systems + like Eucalyptus, OpenStack and Open Nebula. + + Boto is developed mainly using Python 2.6.6 and Python 2.7.3 on Mac OSX + and Ubuntu Maverick. It is known to work on other Linux distributions + and on Windows. Most of Boto requires no additional libraries or packages + other than those that are distributed with Python. Efforts are made + to keep boto compatible with Python 2.5.x but no guarantees are made. + + ************ + Installation + ************ + + Install via `pip`_: + + :: + + $ pip install boto + + Install from source: + + :: + + $ git clone git://github.com/boto/boto.git + $ cd boto + $ python setup.py install + + ********** + ChangeLogs + ********** + + To see what has changed over time in boto, you can check out the + release notes at `http://docs.pythonboto.org/en/latest/#release-notes` + + *************************** + Finding Out More About Boto + *************************** + + The main source code repository for boto can be found on `github.com`_. + The boto project uses the `gitflow`_ model for branching. + + `Online documentation`_ is also available. The online documentation includes + full API documentation as well as Getting Started Guides for many of the boto + modules. + + Boto releases can be found on the `Python Cheese Shop`_. + + Join our IRC channel `#boto` on FreeNode. + Webchat IRC channel: http://webchat.freenode.net/?channels=boto + + Join the `boto-users Google Group`_. + + ************************* + Getting Started with Boto + ************************* + + Your credentials can be passed into the methods that create + connections. Alternatively, boto will check for the existence of the + following environment variables to ascertain your credentials: + + **AWS_ACCESS_KEY_ID** - Your AWS Access Key ID + + **AWS_SECRET_ACCESS_KEY** - Your AWS Secret Access Key + + Credentials and other boto-related settings can also be stored in a + boto config file. See `this`_ for details. + + .. _Contributing Guidelines: https://github.com/boto/boto/blob/develop/CONTRIBUTING + .. _Porting Guide: http://boto.readthedocs.org/en/latest/porting_guide.html + .. _pip: http://www.pip-installer.org/ + .. _release notes: https://github.com/boto/boto/wiki + .. _github.com: http://github.com/boto/boto + .. _Online documentation: http://docs.pythonboto.org + .. _Python Cheese Shop: http://pypi.python.org/pypi/boto + .. _this: http://docs.pythonboto.org/en/latest/boto_config_tut.html + .. _gitflow: http://nvie.com/posts/a-successful-git-branching-model/ + .. _neo: https://github.com/boto/boto/tree/neo + .. _boto-users Google Group: https://groups.google.com/forum/?fromgroups#!forum/boto-users + +Platform: Posix; MacOS X; Windows +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Topic :: Internet +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 diff --git a/desktop/core/ext-py/boto-2.38.0/README.rst b/desktop/core/ext-py/boto-2.38.0/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..d20d0459782dbd4f0a922f90aec3b728b6ffbe1f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/README.rst @@ -0,0 +1,209 @@ +#### +boto +#### +boto 2.38.0 + +Released: 9-Apr-2015 + +.. image:: https://travis-ci.org/boto/boto.svg?branch=develop + :target: https://travis-ci.org/boto/boto + +.. image:: https://pypip.in/d/boto/badge.svg + :target: https://pypi.python.org/pypi/boto/ + +************ +Introduction +************ + +Boto is a Python package that provides interfaces to Amazon Web Services. +Currently, all features work with Python 2.6 and 2.7. Work is under way to +support Python 3.3+ in the same codebase. Modules are being ported one at +a time with the help of the open source community, so please check below +for compatibility with Python 3.3+. + +To port a module to Python 3.3+, please view our `Contributing Guidelines`_ +and the `Porting Guide`_. If you would like, you can open an issue to let +others know about your work in progress. Tests **must** pass on Python +2.6, 2.7, 3.3, and 3.4 for pull requests to be accepted. + +****** +Boto 3 +****** +The next major version of Boto is currently in developer preview and can +be found in the `Boto 3 `__ +repository and installed via ``pip``. It supports the latest service APIs +and provides a high-level object-oriented interface to many services. + +Please try Boto 3 and +`leave feedback `__ with any issues, +suggestions, and feature requests you might have. + +******** +Services +******** + +At the moment, boto supports: + +* Compute + + * Amazon Elastic Compute Cloud (EC2) (Python 3) + * Amazon Elastic Map Reduce (EMR) (Python 3) + * AutoScaling (Python 3) + * Amazon Kinesis (Python 3) + * AWS Lambda (Python 3) + * Amazon EC2 Container Service (Python 3) + +* Content Delivery + + * Amazon CloudFront (Python 3) + +* Database + + * Amazon Relational Data Service (RDS) + * Amazon DynamoDB (Python 3) + * Amazon SimpleDB (Python 3) + * Amazon ElastiCache (Python 3) + * Amazon Redshift (Python 3) + +* Deployment and Management + + * AWS Elastic Beanstalk (Python 3) + * AWS CloudFormation (Python 3) + * AWS Data Pipeline (Python 3) + * AWS Opsworks (Python 3) + * AWS CloudTrail (Python 3) + * AWS CodeDeploy (Python 3) + +* Administration & Security + + * AWS Identity and Access Management (IAM) (Python 3) + * AWS Key Management Service (KMS) (Python 3) + * AWS Config (Python 3) + * AWS CloudHSM (Python 3) + +* Application Services + + * Amazon CloudSearch (Python 3) + * Amazon CloudSearch Domain (Python 3) + * Amazon Elastic Transcoder (Python 3) + * Amazon Simple Workflow Service (SWF) (Python 3) + * Amazon Simple Queue Service (SQS) (Python 3) + * Amazon Simple Notification Server (SNS) (Python 3) + * Amazon Simple Email Service (SES) (Python 3) + * Amazon Cognito Identity (Python 3) + * Amazon Cognito Sync (Python 3) + * Amazon Machine Learning (Python 3) + +* Monitoring + + * Amazon CloudWatch (EC2 Only) (Python 3) + * Amazon CloudWatch Logs (Python 3) + +* Networking + + * Amazon Route53 (Python 3) + * Amazon Route 53 Domains (Python 3) + * Amazon Virtual Private Cloud (VPC) (Python 3) + * Elastic Load Balancing (ELB) (Python 3) + * AWS Direct Connect (Python 3) + +* Payments and Billing + + * Amazon Flexible Payment Service (FPS) + +* Storage + + * Amazon Simple Storage Service (S3) (Python 3) + * Amazon Glacier (Python 3) + * Amazon Elastic Block Store (EBS) + * Google Cloud Storage + +* Workforce + + * Amazon Mechanical Turk + +* Other + + * Marketplace Web Services (Python 3) + * AWS Support (Python 3) + +The goal of boto is to support the full breadth and depth of Amazon +Web Services. In addition, boto provides support for other public +services such as Google Storage in addition to private cloud systems +like Eucalyptus, OpenStack and Open Nebula. + +Boto is developed mainly using Python 2.6.6 and Python 2.7.3 on Mac OSX +and Ubuntu Maverick. It is known to work on other Linux distributions +and on Windows. Most of Boto requires no additional libraries or packages +other than those that are distributed with Python. Efforts are made +to keep boto compatible with Python 2.5.x but no guarantees are made. + +************ +Installation +************ + +Install via `pip`_: + +:: + + $ pip install boto + +Install from source: + +:: + + $ git clone git://github.com/boto/boto.git + $ cd boto + $ python setup.py install + +********** +ChangeLogs +********** + +To see what has changed over time in boto, you can check out the +release notes at `http://docs.pythonboto.org/en/latest/#release-notes` + +*************************** +Finding Out More About Boto +*************************** + +The main source code repository for boto can be found on `github.com`_. +The boto project uses the `gitflow`_ model for branching. + +`Online documentation`_ is also available. The online documentation includes +full API documentation as well as Getting Started Guides for many of the boto +modules. + +Boto releases can be found on the `Python Cheese Shop`_. + +Join our IRC channel `#boto` on FreeNode. +Webchat IRC channel: http://webchat.freenode.net/?channels=boto + +Join the `boto-users Google Group`_. + +************************* +Getting Started with Boto +************************* + +Your credentials can be passed into the methods that create +connections. Alternatively, boto will check for the existence of the +following environment variables to ascertain your credentials: + +**AWS_ACCESS_KEY_ID** - Your AWS Access Key ID + +**AWS_SECRET_ACCESS_KEY** - Your AWS Secret Access Key + +Credentials and other boto-related settings can also be stored in a +boto config file. See `this`_ for details. + +.. _Contributing Guidelines: https://github.com/boto/boto/blob/develop/CONTRIBUTING +.. _Porting Guide: http://boto.readthedocs.org/en/latest/porting_guide.html +.. _pip: http://www.pip-installer.org/ +.. _release notes: https://github.com/boto/boto/wiki +.. _github.com: http://github.com/boto/boto +.. _Online documentation: http://docs.pythonboto.org +.. _Python Cheese Shop: http://pypi.python.org/pypi/boto +.. _this: http://docs.pythonboto.org/en/latest/boto_config_tut.html +.. _gitflow: http://nvie.com/posts/a-successful-git-branching-model/ +.. _neo: https://github.com/boto/boto/tree/neo +.. _boto-users Google Group: https://groups.google.com/forum/?fromgroups#!forum/boto-users diff --git a/desktop/core/ext-py/boto-2.38.0/bin/asadmin b/desktop/core/ext-py/boto-2.38.0/bin/asadmin new file mode 100755 index 0000000000000000000000000000000000000000..a8a38f178ef937fad9395b02c85a292370deefca --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/asadmin @@ -0,0 +1,290 @@ +#!/usr/bin/env python +# Copyright (c) 2011 Joel Barciauskas http://joel.barciausk.as/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +# +# Auto Scaling Groups Tool +# +VERSION="0.1" +usage = """%prog [options] [command] +Commands: + list|ls List all Auto Scaling Groups + list-lc|ls-lc List all Launch Configurations + delete Delete ASG + delete-lc Delete Launch Configuration + get Get details of ASG + create Create an ASG + create-lc Create a Launch Configuration + update Update a property of an ASG + update-image Update image ID for ASG by creating a new LC + migrate-instances Shut down current instances one by one and wait for ASG to start up a new instance with the current AMI (useful in conjunction with update-image) + +Examples: + + 1) Create launch configuration + bin/asadmin create-lc my-lc-1 -i ami-1234abcd -t c1.xlarge -k my-key -s web-group -m + + 2) Create auto scaling group in us-east-1a and us-east-1c with a load balancer and min size of 2 and max size of 6 + bin/asadmin create my-asg -z us-east-1a -z us-east-1c -l my-lc-1 -b my-lb -H ELB -p 180 -x 2 -X 6 +""" + +def get_group(autoscale, name): + g = autoscale.get_all_groups(names=[name]) + if len(g) < 1: + print "No auto scaling groups by the name of %s found" % name + return sys.exit(1) + return g[0] + +def get_lc(autoscale, name): + l = autoscale.get_all_launch_configurations(names=[name]) + if len(l) < 1: + print "No launch configurations by the name of %s found" % name + sys.exit(1) + return l[0] + +def list(autoscale): + """List all ASGs""" + print "%-20s %s" % ("Name", "LC Name") + print "-"*80 + groups = autoscale.get_all_groups() + for g in groups: + print "%-20s %s" % (g.name, g.launch_config_name) + +def list_lc(autoscale): + """List all LCs""" + print "%-30s %-20s %s" % ("Name", "Image ID", "Instance Type") + print "-"*80 + for l in autoscale.get_all_launch_configurations(): + print "%-30s %-20s %s" % (l.name, l.image_id, l.instance_type) + +def get(autoscale, name): + """Get details about ASG """ + g = get_group(autoscale, name) + print "="*80 + print "%-30s %s" % ('Name:', g.name) + print "%-30s %s" % ('Launch configuration:', g.launch_config_name) + print "%-30s %s" % ('Minimum size:', g.min_size) + print "%-30s %s" % ('Maximum size:', g.max_size) + print "%-30s %s" % ('Desired capacity:', g.desired_capacity) + print "%-30s %s" % ('Load balancers:', ','.join(g.load_balancers)) + + print + + print "Instances" + print "---------" + print "%-20s %-20s %-20s %s" % ("ID", "Status", "Health", "AZ") + for i in g.instances: + print "%-20s %-20s %-20s %s" % \ + (i.instance_id, i.lifecycle_state, i.health_status, i.availability_zone) + + print + +def create(autoscale, name, zones, lc_name, load_balancers, hc_type, hc_period, + min_size, max_size, cooldown, capacity): + """Create an ASG named """ + g = AutoScalingGroup(name=name, launch_config=lc_name, + availability_zones=zones, load_balancers=load_balancers, + default_cooldown=cooldown, health_check_type=hc_type, + health_check_period=hc_period, desired_capacity=capacity, + min_size=min_size, max_size=max_size) + g = autoscale.create_auto_scaling_group(g) + return list(autoscale) + +def create_lc(autoscale, name, image_id, instance_type, key_name, + security_groups, instance_monitoring): + l = LaunchConfiguration(name=name, image_id=image_id, + instance_type=instance_type,key_name=key_name, + security_groups=security_groups, + instance_monitoring=instance_monitoring) + l = autoscale.create_launch_configuration(l) + return list_lc(autoscale) + +def update(autoscale, name, prop, value): + g = get_group(autoscale, name) + setattr(g, prop, value) + g.update() + return get(autoscale, name) + +def delete(autoscale, name, force_delete=False): + """Delete this ASG""" + g = get_group(autoscale, name) + autoscale.delete_auto_scaling_group(g.name, force_delete) + print "Auto scaling group %s deleted" % name + return list(autoscale) + +def delete_lc(autoscale, name): + """Delete this LC""" + l = get_lc(autoscale, name) + autoscale.delete_launch_configuration(name) + print "Launch configuration %s deleted" % name + return list_lc(autoscale) + +def update_image(autoscale, name, lc_name, image_id, is_migrate_instances=False): + """ Get the current launch config, + Update its name and image id + Re-create it as a new launch config + Update the ASG with the new LC + Delete the old LC """ + + g = get_group(autoscale, name) + l = get_lc(autoscale, g.launch_config_name) + + old_lc_name = l.name + l.name = lc_name + l.image_id = image_id + autoscale.create_launch_configuration(l) + g.launch_config_name = l.name + g.update() + + if(is_migrate_instances): + migrate_instances(autoscale, name) + else: + return get(autoscale, name) + +def migrate_instances(autoscale, name): + """ Shut down instances of the old image type one by one + and let the ASG start up instances with the new image """ + g = get_group(autoscale, name) + + old_instances = g.instances + ec2 = boto.connect_ec2() + for old_instance in old_instances: + print "Terminating instance " + old_instance.instance_id + ec2.terminate_instances([old_instance.instance_id]) + while True: + g = get_group(autoscale, name) + new_instances = g.instances + for new_instance in new_instances: + hasOldInstance = False + instancesReady = True + if(old_instance.instance_id == new_instance.instance_id): + hasOldInstance = True + print "Waiting for old instance to shut down..." + break + elif(new_instance.lifecycle_state != 'InService'): + instancesReady = False + print "Waiting for instances to be ready...." + break + if(not hasOldInstance and instancesReady): + break + else: + time.sleep(20) + return get(autoscale, name) + +if __name__ == "__main__": + try: + import readline + except ImportError: + pass + import boto + import sys + import time + from optparse import OptionParser + from boto.mashups.iobject import IObject + from boto.ec2.autoscale import AutoScalingGroup + from boto.ec2.autoscale import LaunchConfiguration + parser = OptionParser(version=VERSION, usage=usage) + """ Create launch config options """ + parser.add_option("-i", "--image-id", + help="Image (AMI) ID", action="store", + type="string", default=None, dest="image_id") + parser.add_option("-t", "--instance-type", + help="EC2 Instance Type (e.g., m1.large, c1.xlarge), default is m1.large", + action="store", type="string", default="m1.large", dest="instance_type") + parser.add_option("-k", "--key-name", + help="EC2 Key Name", + action="store", type="string", dest="key_name") + parser.add_option("-s", "--security-group", + help="EC2 Security Group", + action="append", default=[], dest="security_groups") + parser.add_option("-m", "--monitoring", + help="Enable instance monitoring", + action="store_true", default=False, dest="instance_monitoring") + + """ Create auto scaling group options """ + parser.add_option("-z", "--zone", help="Add availability zone", action="append", default=[], dest="zones") + parser.add_option("-l", "--lc-name", + help="Launch configuration name", + action="store", default=None, type="string", dest="lc_name") + parser.add_option("-b", "--load-balancer", + help="Load balancer name", + action="append", default=[], dest="load_balancers") + parser.add_option("-H", "--health-check-type", + help="Health check type (EC2 or ELB)", + action="store", default="EC2", type="string", dest="hc_type") + parser.add_option("-p", "--health-check-period", + help="Health check period in seconds (default 300s)", + action="store", default=300, type="int", dest="hc_period") + parser.add_option("-X", "--max-size", + help="Max size of ASG (default 10)", + action="store", default=10, type="int", dest="max_size") + parser.add_option("-x", "--min-size", + help="Min size of ASG (default 2)", + action="store", default=2, type="int", dest="min_size") + parser.add_option("-c", "--cooldown", + help="Cooldown time after a scaling activity in seconds (default 300s)", + action="store", default=300, type="int", dest="cooldown") + parser.add_option("-C", "--desired-capacity", + help="Desired capacity of the ASG", + action="store", default=None, type="int", dest="capacity") + parser.add_option("-f", "--force", + help="Force delete ASG", + action="store_true", default=False, dest="force") + parser.add_option("-y", "--migrate-instances", + help="Automatically migrate instances to new image when running update-image", + action="store_true", default=False, dest="migrate_instances") + + (options, args) = parser.parse_args() + + if len(args) < 1: + parser.print_help() + sys.exit(1) + + autoscale = boto.connect_autoscale() + + print "%s" % (autoscale.region.endpoint) + + command = args[0].lower() + if command in ("ls", "list"): + list(autoscale) + elif command in ("ls-lc", "list-lc"): + list_lc(autoscale) + elif command == "get": + get(autoscale, args[1]) + elif command == "create": + create(autoscale, args[1], options.zones, options.lc_name, + options.load_balancers, options.hc_type, + options.hc_period, options.min_size, options.max_size, + options.cooldown, options.capacity) + elif command == "create-lc": + create_lc(autoscale, args[1], options.image_id, options.instance_type, + options.key_name, options.security_groups, + options.instance_monitoring) + elif command == "update": + update(autoscale, args[1], args[2], args[3]) + elif command == "delete": + delete(autoscale, args[1], options.force) + elif command == "delete-lc": + delete_lc(autoscale, args[1]) + elif command == "update-image": + update_image(autoscale, args[1], args[2], + options.image_id, options.migrate_instances) + elif command == "migrate-instances": + migrate_instances(autoscale, args[1]) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/bundle_image b/desktop/core/ext-py/boto-2.38.0/bin/bundle_image new file mode 100755 index 0000000000000000000000000000000000000000..70969790826850c08b7bc39867406e75b7a1ee87 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/bundle_image @@ -0,0 +1,27 @@ +#!/usr/bin/env python +from boto.manage.server import Server +if __name__ == "__main__": + from optparse import OptionParser + parser = OptionParser(version="%prog 1.0", usage="Usage: %prog [options] instance-id [instance-id-2]") + + # Commands + parser.add_option("-b", "--bucket", help="Destination Bucket", dest="bucket", default=None) + parser.add_option("-p", "--prefix", help="AMI Prefix", dest="prefix", default=None) + parser.add_option("-k", "--key", help="Private Key File", dest="key_file", default=None) + parser.add_option("-c", "--cert", help="Public Certificate File", dest="cert_file", default=None) + parser.add_option("-s", "--size", help="AMI Size", dest="size", default=None) + parser.add_option("-i", "--ssh-key", help="SSH Keyfile", dest="ssh_key", default=None) + parser.add_option("-u", "--user-name", help="SSH Username", dest="uname", default="root") + parser.add_option("-n", "--name", help="Name of Image", dest="name") + (options, args) = parser.parse_args() + + for instance_id in args: + try: + s = Server.find(instance_id=instance_id).next() + print "Found old server object" + except StopIteration: + print "New Server Object Created" + s = Server.create_from_instance_id(instance_id, options.name) + assert(s.hostname is not None) + b = s.get_bundler(uname=options.uname) + b.bundle(bucket=options.bucket,prefix=options.prefix,key_file=options.key_file,cert_file=options.cert_file,size=int(options.size),ssh_key=options.ssh_key) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/cfadmin b/desktop/core/ext-py/boto-2.38.0/bin/cfadmin new file mode 100755 index 0000000000000000000000000000000000000000..6fcdd86dc8c697e8ce23e1cc13847ecf5f552266 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/cfadmin @@ -0,0 +1,108 @@ +#!/usr/bin/env python +# Author: Chris Moyer +# +# cfadmin is similar to sdbadmin for CloudFront, it's a simple +# console utility to perform the most frequent tasks with CloudFront +# +def _print_distributions(dists): + """Internal function to print out all the distributions provided""" + print "%-12s %-50s %s" % ("Status", "Domain Name", "Origin") + print "-"*80 + for d in dists: + print "%-12s %-50s %-30s" % (d.status, d.domain_name, d.origin) + for cname in d.cnames: + print " "*12, "CNAME => %s" % cname + print "" + +def help(cf, fnc=None): + """Print help message, optionally about a specific function""" + import inspect + self = sys.modules['__main__'] + if fnc: + try: + cmd = getattr(self, fnc) + except: + cmd = None + if not inspect.isfunction(cmd): + print "No function named: %s found" % fnc + sys.exit(2) + (args, varargs, varkw, defaults) = inspect.getargspec(cmd) + print cmd.__doc__ + print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args[1:]])) + else: + print "Usage: cfadmin [command]" + for cname in dir(self): + if not cname.startswith("_"): + cmd = getattr(self, cname) + if inspect.isfunction(cmd): + doc = cmd.__doc__ + print "\t%s - %s" % (cname, doc) + sys.exit(1) + +def ls(cf): + """List all distributions and streaming distributions""" + print "Standard Distributions" + _print_distributions(cf.get_all_distributions()) + print "Streaming Distributions" + _print_distributions(cf.get_all_streaming_distributions()) + +def invalidate(cf, origin_or_id, *paths): + """Create a cloudfront invalidation request""" + # Allow paths to be passed using stdin + if not paths: + paths = [] + for path in sys.stdin.readlines(): + path = path.strip() + if path: + paths.append(path) + dist = None + for d in cf.get_all_distributions(): + if d.id == origin_or_id or d.origin.dns_name == origin_or_id: + dist = d + break + if not dist: + print "Distribution not found: %s" % origin_or_id + sys.exit(1) + cf.create_invalidation_request(dist.id, paths) + +def listinvalidations(cf, origin_or_id): + """List invalidation requests for a given origin""" + dist = None + for d in cf.get_all_distributions(): + if d.id == origin_or_id or d.origin.dns_name == origin_or_id: + dist = d + break + if not dist: + print "Distribution not found: %s" % origin_or_id + sys.exit(1) + results = cf.get_invalidation_requests(dist.id) + if results: + for result in results: + if result.status == "InProgress": + result = result.get_invalidation_request() + print result.id, result.status, result.paths + else: + print result.id, result.status + + +if __name__ == "__main__": + import boto + import sys + cf = boto.connect_cloudfront() + self = sys.modules['__main__'] + if len(sys.argv) >= 2: + try: + cmd = getattr(self, sys.argv[1]) + except: + cmd = None + args = sys.argv[2:] + else: + cmd = help + args = [] + if not cmd: + cmd = help + try: + cmd(cf, *args) + except TypeError, e: + print e + help(cf, cmd.__name__) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/cq b/desktop/core/ext-py/boto-2.38.0/bin/cq new file mode 100755 index 0000000000000000000000000000000000000000..05bc95b9cae364acd80bcf67ab512a2d659cbf78 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/cq @@ -0,0 +1,92 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import getopt, sys +import boto.sqs +from boto.sqs.connection import SQSConnection +from boto.exception import SQSError + +def usage(): + print 'cq [-c] [-q queue_name] [-o output_file] [-t timeout] [-r region]' + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], 'hcq:o:t:r:', + ['help', 'clear', 'queue=', + 'output=', 'timeout=', 'region=']) + except: + usage() + sys.exit(2) + queue_name = '' + output_file = '' + timeout = 30 + region = '' + clear = False + for o, a in opts: + if o in ('-h', '--help'): + usage() + sys.exit() + if o in ('-q', '--queue'): + queue_name = a + if o in ('-o', '--output'): + output_file = a + if o in ('-c', '--clear'): + clear = True + if o in ('-t', '--timeout'): + timeout = int(a) + if o in ('-r', '--region'): + region = a + if region: + c = boto.sqs.connect_to_region(region) + if c is None: + print 'Invalid region (%s)' % region + sys.exit(1) + else: + c = SQSConnection() + if queue_name: + try: + rs = [c.create_queue(queue_name)] + except SQSError, e: + print 'An Error Occurred:' + print '%s: %s' % (e.status, e.reason) + print e.body + sys.exit() + else: + try: + rs = c.get_all_queues() + except SQSError, e: + print 'An Error Occurred:' + print '%s: %s' % (e.status, e.reason) + print e.body + sys.exit() + for q in rs: + if clear: + n = q.clear() + print 'clearing %d messages from %s' % (n, q.id) + elif output_file: + q.dump(output_file) + else: + print q.id, q.count(vtimeout=timeout) + +if __name__ == "__main__": + main() + diff --git a/desktop/core/ext-py/boto-2.38.0/bin/cwutil b/desktop/core/ext-py/boto-2.38.0/bin/cwutil new file mode 100755 index 0000000000000000000000000000000000000000..e22b64ca110316ad6354f253668706a5c4c9c02c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/cwutil @@ -0,0 +1,140 @@ +#!/usr/bin/env python +# Author: Chris Moyer +# Description: CloudWatch Utility +# For listing stats, creating alarms, and managing +# other CloudWatch aspects + +import boto +cw = boto.connect_cloudwatch() + +from datetime import datetime, timedelta + +def _parse_time(time_string): + """Internal function to parse a time string""" + +def _parse_dict(d_string): + result = {} + if d_string: + for d in d_string.split(","): + d = d.split(":") + result[d[0]] = d[1] + return result + +def ls(namespace=None): + """ + List metrics, optionally filtering by a specific namespace + namespace: Optional Namespace to filter on + """ + print "%-10s %-50s %s" % ("Namespace", "Metric Name", "Dimensions") + print "-"*80 + for m in cw.list_metrics(): + if namespace is None or namespace.upper() in m.namespace: + print "%-10s %-50s %s" % (m.namespace, m.name, m.dimensions) + +def stats(namespace, metric_name, dimensions=None, statistics="Average", start_time=None, end_time=None, period=60, unit=None): + """ + Lists the statistics for a specific metric + namespace: The namespace to use, usually "AWS/EC2", "AWS/SQS", etc. + metric_name: The name of the metric to track, pulled from `ls` + dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue) + statistics: The statistics to measure, defaults to "Average" + 'Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount' + start_time: Start time, default to now - 1 day + end_time: End time, default to now + period: Period/interval for counts, default to 60 minutes + unit: Unit to track, default depends on what metric is being tracked + """ + + # Parse the dimensions + dimensions = _parse_dict(dimensions) + + # Parse the times + if end_time: + end_time = _parse_time(end_time) + else: + end_time = datetime.utcnow() + if start_time: + start_time = _parse_time(start_time) + else: + start_time = datetime.utcnow() - timedelta(days=1) + + print "%-30s %s" % ('Timestamp', statistics) + print "-"*50 + data = {} + for m in cw.get_metric_statistics(int(period), start_time, end_time, metric_name, namespace, statistics, dimensions, unit): + data[m['Timestamp']] = m[statistics] + keys = data.keys() + keys.sort() + for k in keys: + print "%-30s %s" % (k, data[k]) + +def put(namespace, metric_name, dimensions=None, value=None, unit=None, statistics=None, timestamp=None): + """ + Publish custom metrics + namespace: The namespace to use; values starting with "AWS/" are reserved + metric_name: The name of the metric to update + dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue) + value: The value to store, mutually exclusive with `statistics` + statistics: The statistics to store, mutually exclusive with `value` + (must specify all of "Minimum", "Maximum", "Sum", "SampleCount") + timestamp: The timestamp of this measurement, default is current server time + unit: Unit to track, default depends on what metric is being tracked + """ + + def simplify(lst): + return lst[0] if len(lst) == 1 else lst + + print cw.put_metric_data(namespace, simplify(metric_name.split(';')), + dimensions = simplify(map(_parse_dict, dimensions.split(';'))) if dimensions else None, + value = simplify(value.split(';')) if value else None, + statistics = simplify(map(_parse_dict, statistics.split(';'))) if statistics else None, + timestamp = simplify(timestamp.split(';')) if timestamp else None, + unit = simplify(unit.split(';')) if unit else None) + +def help(fnc=None): + """ + Print help message, optionally about a specific function + """ + import inspect + self = sys.modules['__main__'] + if fnc: + try: + cmd = getattr(self, fnc) + except: + cmd = None + if not inspect.isfunction(cmd): + print "No function named: %s found" % fnc + sys.exit(2) + (args, varargs, varkw, defaults) = inspect.getargspec(cmd) + print cmd.__doc__ + print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args])) + else: + print "Usage: cwutil [command]" + for cname in dir(self): + if not cname.startswith("_") and not cname == "cmd": + cmd = getattr(self, cname) + if inspect.isfunction(cmd): + doc = cmd.__doc__ + print "\t%s - %s" % (cname, doc) + sys.exit(1) + + +if __name__ == "__main__": + import sys + self = sys.modules['__main__'] + if len(sys.argv) >= 2: + try: + cmd = getattr(self, sys.argv[1]) + except: + cmd = None + args = sys.argv[2:] + else: + cmd = help + args = [] + if not cmd: + cmd = help + try: + cmd(*args) + except TypeError, e: + print e + help(cmd.__name__) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/dynamodb_dump b/desktop/core/ext-py/boto-2.38.0/bin/dynamodb_dump new file mode 100755 index 0000000000000000000000000000000000000000..8b6aada77b81f2c5df0dcc3c005225b3d67c247c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/dynamodb_dump @@ -0,0 +1,75 @@ +#!/usr/bin/env python + +import argparse +import errno +import os + +import boto +from boto.compat import json + + +DESCRIPTION = """Dump the contents of one or more DynamoDB tables to the local filesystem. + +Each table is dumped into two files: + - {table_name}.metadata stores the table's name, schema and provisioned + throughput. + - {table_name}.data stores the table's actual contents. + +Both files are created in the current directory. To write them somewhere else, +use the --out-dir parameter (the target directory will be created if needed). +""" + + +def dump_table(table, out_dir): + metadata_file = os.path.join(out_dir, "%s.metadata" % table.name) + data_file = os.path.join(out_dir, "%s.data" % table.name) + + with open(metadata_file, "w") as metadata_fd: + json.dump( + { + "name": table.name, + "schema": table.schema.dict, + "read_units": table.read_units, + "write_units": table.write_units, + }, + metadata_fd + ) + + with open(data_file, "w") as data_fd: + for item in table.scan(): + # JSON can't serialize sets -- convert those to lists. + data = {} + for k, v in item.iteritems(): + if isinstance(v, (set, frozenset)): + data[k] = list(v) + else: + data[k] = v + + data_fd.write(json.dumps(data)) + data_fd.write("\n") + + +def dynamodb_dump(tables, out_dir): + try: + os.makedirs(out_dir) + except OSError as e: + # We don't care if the dir already exists. + if e.errno != errno.EEXIST: + raise + + conn = boto.connect_dynamodb() + for t in tables: + dump_table(conn.get_table(t), out_dir) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + prog="dynamodb_dump", + description=DESCRIPTION + ) + parser.add_argument("--out-dir", default=".") + parser.add_argument("tables", metavar="TABLES", nargs="+") + + namespace = parser.parse_args() + + dynamodb_dump(namespace.tables, namespace.out_dir) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/dynamodb_load b/desktop/core/ext-py/boto-2.38.0/bin/dynamodb_load new file mode 100755 index 0000000000000000000000000000000000000000..46a8d392ff8b58b8a56f5e158815216ac6970b3d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/dynamodb_load @@ -0,0 +1,109 @@ +#!/usr/bin/env python + +import argparse +import os + +import boto +from boto.compat import json +from boto.dynamodb.schema import Schema + + +DESCRIPTION = """Load data into one or more DynamoDB tables. + +For each table, data is read from two files: + - {table_name}.metadata for the table's name, schema and provisioned + throughput (only required if creating the table). + - {table_name}.data for the table's actual contents. + +Both files are searched for in the current directory. To read them from +somewhere else, use the --in-dir parameter. + +This program does not wipe the tables prior to loading data. However, any +items present in the data files will overwrite the table's contents. +""" + + +def _json_iterload(fd): + """Lazily load newline-separated JSON objects from a file-like object.""" + buffer = "" + eof = False + while not eof: + try: + # Add a line to the buffer + buffer += fd.next() + except StopIteration: + # We can't let that exception bubble up, otherwise the last + # object in the file will never be decoded. + eof = True + try: + # Try to decode a JSON object. + json_object = json.loads(buffer.strip()) + + # Success: clear the buffer (everything was decoded). + buffer = "" + except ValueError: + if eof and buffer.strip(): + # No more lines to load and the buffer contains something other + # than whitespace: the file is, in fact, malformed. + raise + # We couldn't decode a complete JSON object: load more lines. + continue + + yield json_object + + +def create_table(metadata_fd): + """Create a table from a metadata file-like object.""" + + +def load_table(table, in_fd): + """Load items into a table from a file-like object.""" + for i in _json_iterload(in_fd): + # Convert lists back to sets. + data = {} + for k, v in i.iteritems(): + if isinstance(v, list): + data[k] = set(v) + else: + data[k] = v + table.new_item(attrs=data).put() + + +def dynamodb_load(tables, in_dir, create_tables): + conn = boto.connect_dynamodb() + for t in tables: + metadata_file = os.path.join(in_dir, "%s.metadata" % t) + data_file = os.path.join(in_dir, "%s.data" % t) + if create_tables: + with open(metadata_file) as meta_fd: + metadata = json.load(meta_fd) + table = conn.create_table( + name=t, + schema=Schema(metadata["schema"]), + read_units=metadata["read_units"], + write_units=metadata["write_units"], + ) + table.refresh(wait_for_active=True) + else: + table = conn.get_table(t) + + with open(data_file) as in_fd: + load_table(table, in_fd) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + prog="dynamodb_load", + description=DESCRIPTION + ) + parser.add_argument( + "--create-tables", + action="store_true", + help="Create the tables if they don't exist already (without this flag, attempts to load data into non-existing tables fail)." + ) + parser.add_argument("--in-dir", default=".") + parser.add_argument("tables", metavar="TABLES", nargs="+") + + namespace = parser.parse_args() + + dynamodb_load(namespace.tables, namespace.in_dir, namespace.create_tables) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/elbadmin b/desktop/core/ext-py/boto-2.38.0/bin/elbadmin new file mode 100755 index 0000000000000000000000000000000000000000..d83643f037eec7f45d0f28ea20deea8aaf5c959f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/elbadmin @@ -0,0 +1,301 @@ +#!/usr/bin/env python +# Copyright (c) 2009 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +# +# Elastic Load Balancer Tool +# +VERSION = "0.2" +usage = """%prog [options] [command] +Commands: + list|ls List all Elastic Load Balancers + delete Delete ELB + get Get all instances associated with + create Create an ELB; -z and -l are required + add Add in ELB + remove|rm Remove from ELB + reap Remove terminated instances from ELB + enable|en Enable Zone for ELB + disable Disable Zone for ELB + addl Add listeners (specified by -l) to the ELB + + rml Remove Listener(s) specified by the port on + the ELB +""" + + +def find_elb(elb, name): + try: + elbs = elb.get_all_load_balancers(name) + except boto.exception.BotoServerError as se: + if se.code == 'LoadBalancerNotFound': + elbs = [] + else: + raise + + if len(elbs) < 1: + print "No load balancer by the name of %s found" % name + return None + elif len(elbs) > 1: + print "More than one elb matches %s?" % name + return None + + # Should not happen + if name not in elbs[0].name: + print "No load balancer by the name of %s found" % name + return None + + return elbs[0] + + +def list(elb): + """List all ELBs""" + print "%-20s %s" % ("Name", "DNS Name") + print "-" * 80 + for b in elb.get_all_load_balancers(): + print "%-20s %s" % (b.name, b.dns_name) + +def check_valid_region(conn, region): + if conn is None: + print 'Invalid region (%s)' % region + sys.exit(1) + +def get(elb, name): + """Get details about ELB """ + + b = find_elb(elb, name) + if b: + print "=" * 80 + print "Name: %s" % b.name + print "DNS Name: %s" % b.dns_name + if b.canonical_hosted_zone_name: + chzn = b.canonical_hosted_zone_name + print "Canonical hosted zone name: %s" % chzn + if b.canonical_hosted_zone_name_id: + chznid = b.canonical_hosted_zone_name_id + print "Canonical hosted zone name id: %s" % chznid + print + + print "Health Check: %s" % b.health_check + print + + print "Listeners" + print "---------" + print "%-8s %-8s %s" % ("IN", "OUT", "PROTO") + for l in b.listeners: + print "%-8s %-8s %s" % (l[0], l[1], l[2]) + + print + + print " Zones " + print "---------" + for z in b.availability_zones: + print z + + print + + # Make map of all instance Id's to Name tags + import boto + if not options.region: + ec2 = boto.connect_ec2() + else: + ec2 = boto.ec2.connect_to_region(options.region) + check_valid_region(ec2, options.region) + + instance_health = b.get_instance_health() + instances = [state.instance_id for state in instance_health] + + names = dict((k,'') for k in instances) + for i in ec2.get_only_instances(): + if i.id in instances: + names[i.id] = i.tags.get('Name', '') + + name_column_width = max([4] + [len(v) for k,v in names.iteritems()]) + 2 + + print "Instances" + print "---------" + print "%-12s %-15s %-*s %s" % ("ID", + "STATE", + name_column_width, "NAME", + "DESCRIPTION") + for state in instance_health: + print "%-12s %-15s %-*s %s" % (state.instance_id, + state.state, + name_column_width, names[state.instance_id], + state.description) + + print + + +def create(elb, name, zones, listeners): + """Create an ELB named """ + l_list = [] + for l in listeners: + l = l.split(",") + if l[2] == 'HTTPS': + l_list.append((int(l[0]), int(l[1]), l[2], l[3])) + else: + l_list.append((int(l[0]), int(l[1]), l[2])) + + b = elb.create_load_balancer(name, zones, l_list) + return get(elb, name) + + +def delete(elb, name): + """Delete this ELB""" + b = find_elb(elb, name) + if b: + b.delete() + print "Load Balancer %s deleted" % name + + +def add_instances(elb, name, instances): + """Add to ELB """ + b = find_elb(elb, name) + if b: + b.register_instances(instances) + return get(elb, name) + + +def remove_instances(elb, name, instances): + """Remove instance from elb """ + b = find_elb(elb, name) + if b: + b.deregister_instances(instances) + return get(elb, name) + + +def reap_instances(elb, name): + """Remove terminated instances from elb """ + b = find_elb(elb, name) + if b: + for state in b.get_instance_health(): + if (state.state == 'OutOfService' and + state.description == 'Instance is in terminated state.'): + b.deregister_instances([state.instance_id]) + return get(elb, name) + + +def enable_zone(elb, name, zone): + """Enable for elb""" + b = find_elb(elb, name) + if b: + b.enable_zones([zone]) + return get(elb, name) + + +def disable_zone(elb, name, zone): + """Disable for elb""" + b = find_elb(elb, name) + if b: + b.disable_zones([zone]) + return get(elb, name) + + +def add_listener(elb, name, listeners): + """Add listeners to a given load balancer""" + l_list = [] + for l in listeners: + l = l.split(",") + l_list.append((int(l[0]), int(l[1]), l[2])) + b = find_elb(elb, name) + if b: + b.create_listeners(l_list) + return get(elb, name) + + +def rm_listener(elb, name, ports): + """Remove listeners from a given load balancer""" + b = find_elb(elb, name) + if b: + b.delete_listeners(ports) + return get(elb, name) + + +if __name__ == "__main__": + try: + import readline + except ImportError: + pass + import boto + import sys + from optparse import OptionParser + from boto.mashups.iobject import IObject + parser = OptionParser(version=VERSION, usage=usage) + parser.add_option("-z", "--zone", + help="Operate on zone", + action="append", default=[], dest="zones") + parser.add_option("-l", "--listener", + help="Specify Listener in,out,proto", + action="append", default=[], dest="listeners") + parser.add_option("-r", "--region", + help="Region to connect to", + action="store", dest="region") + + (options, args) = parser.parse_args() + + if len(args) < 1: + parser.print_help() + sys.exit(1) + + if not options.region: + elb = boto.connect_elb() + else: + import boto.ec2.elb + elb = boto.ec2.elb.connect_to_region(options.region) + check_valid_region(elb, options.region) + + print "%s" % (elb.region.endpoint) + + command = args[0].lower() + if command in ("ls", "list"): + list(elb) + elif command == "get": + get(elb, args[1]) + elif command == "create": + if not options.listeners: + print "-l option required for command create" + sys.exit(1) + if not options.zones: + print "-z option required for command create" + sys.exit(1) + create(elb, args[1], options.zones, options.listeners) + elif command == "delete": + delete(elb, args[1]) + elif command in ("add", "put"): + add_instances(elb, args[1], args[2:]) + elif command in ("rm", "remove"): + remove_instances(elb, args[1], args[2:]) + elif command == "reap": + reap_instances(elb, args[1]) + elif command in ("en", "enable"): + enable_zone(elb, args[1], args[2]) + elif command == "disable": + disable_zone(elb, args[1], args[2]) + elif command == "addl": + if not options.listeners: + print "-l option required for command addl" + sys.exit(1) + add_listener(elb, args[1], options.listeners) + elif command == "rml": + if not args[2:]: + print "port required" + sys.exit(2) + rm_listener(elb, args[1], args[2:]) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/fetch_file b/desktop/core/ext-py/boto-2.38.0/bin/fetch_file new file mode 100755 index 0000000000000000000000000000000000000000..defb8b0e8e0eb01df23573f93302326fac2f94bf --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/fetch_file @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# Copyright (c) 2009 Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import sys + + +if __name__ == "__main__": + from optparse import OptionParser + usage = """%prog [options] URI +Fetch a URI using the boto library and (by default) pipe contents to STDOUT +The URI can be either an HTTP URL, or "s3://bucket_name/key_name" +""" + parser = OptionParser(version="0.1", usage=usage) + parser.add_option("-o", "--out-file", + help="File to receive output instead of STDOUT", + dest="outfile") + + (options, args) = parser.parse_args() + if len(args) < 1: + parser.print_help() + sys.exit(1) + from boto.utils import fetch_file + f = fetch_file(args[0]) + if options.outfile: + open(options.outfile, "w").write(f.read()) + else: + print(f.read()) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/glacier b/desktop/core/ext-py/boto-2.38.0/bin/glacier new file mode 100755 index 0000000000000000000000000000000000000000..ae3b0c574ba3e6ca6b887775177144c174094696 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/glacier @@ -0,0 +1,161 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Miguel Olivares http://moliware.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" + glacier + ~~~~~~~ + + Amazon Glacier tool built on top of boto. Look at the usage method to see + how to use it. + + Author: Miguel Olivares +""" +import sys + +from boto.glacier import connect_to_region +from getopt import getopt, GetoptError +from os.path import isfile, basename + + +COMMANDS = ('vaults', 'jobs', 'upload') + + +def usage(): + print(""" +glacier [args] + + Commands + vaults - Operations with vaults + jobs - Operations with jobs + upload - Upload files to a vault. If the vault doesn't exits, it is + created + + Common args: + --access_key - Your AWS Access Key ID. If not supplied, boto will + use the value of the environment variable + AWS_ACCESS_KEY_ID + --secret_key - Your AWS Secret Access Key. If not supplied, boto + will use the value of the environment variable + AWS_SECRET_ACCESS_KEY + --region - AWS region to use. Possible values: us-east-1, us-west-1, + us-west-2, ap-northeast-1, eu-west-1. + Default: us-east-1 + + Vaults operations: + + List vaults: + glacier vaults + + Jobs operations: + + List jobs: + glacier jobs + + Uploading files: + + glacier upload + + Examples : + glacier upload pics *.jpg + glacier upload pics a.jpg b.jpg +""") + sys.exit() + + +def connect(region, debug_level=0, access_key=None, secret_key=None): + """ Connect to a specific region """ + layer2 = connect_to_region(region, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + debug=debug_level) + if layer2 is None: + print('Invalid region (%s)' % region) + sys.exit(1) + return layer2 + + +def list_vaults(region, access_key=None, secret_key=None): + layer2 = connect(region, access_key = access_key, secret_key = secret_key) + for vault in layer2.list_vaults(): + print(vault.arn) + + +def list_jobs(vault_name, region, access_key=None, secret_key=None): + layer2 = connect(region, access_key = access_key, secret_key = secret_key) + print(layer2.layer1.list_jobs(vault_name)) + + +def upload_files(vault_name, filenames, region, access_key=None, secret_key=None): + layer2 = connect(region, access_key = access_key, secret_key = secret_key) + layer2.create_vault(vault_name) + glacier_vault = layer2.get_vault(vault_name) + for filename in filenames: + if isfile(filename): + sys.stdout.write('Uploading %s to %s...' % (filename, vault_name)) + sys.stdout.flush() + archive_id = glacier_vault.upload_archive( + filename, + description = basename(filename)) + print(' done. Vault returned ArchiveID %s' % archive_id) + +def main(): + if len(sys.argv) < 2: + usage() + + command = sys.argv[1] + if command not in COMMANDS: + usage() + + argv = sys.argv[2:] + options = 'a:s:r:' + long_options = ['access_key=', 'secret_key=', 'region='] + try: + opts, args = getopt(argv, options, long_options) + except GetoptError as e: + usage() + + # Parse agument + access_key = secret_key = None + region = 'us-east-1' + for option, value in opts: + if option in ('-a', '--access_key'): + access_key = value + elif option in ('-s', '--secret_key'): + secret_key = value + elif option in ('-r', '--region'): + region = value + # handle each command + if command == 'vaults': + list_vaults(region, access_key, secret_key) + elif command == 'jobs': + if len(args) != 1: + usage() + list_jobs(args[0], region, access_key, secret_key) + elif command == 'upload': + if len(args) < 2: + usage() + upload_files(args[0], args[1:], region, access_key, secret_key) + + +if __name__ == '__main__': + main() diff --git a/desktop/core/ext-py/boto-2.38.0/bin/instance_events b/desktop/core/ext-py/boto-2.38.0/bin/instance_events new file mode 100755 index 0000000000000000000000000000000000000000..a851df665100fb3fb0c5885950964824c34f8363 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/instance_events @@ -0,0 +1,145 @@ +#!/usr/bin/env python +# Copyright (c) 2011 Jim Browne http://www.42lines.net +# Borrows heavily from boto/bin/list_instances which has no attribution +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +VERSION="0.1" +usage = """%prog [options] +Options: + -h, --help show help message (including options list) and exit +""" + +from operator import itemgetter + +HEADERS = { + 'ID': {'get': itemgetter('id'), 'length':14}, + 'Zone': {'get': itemgetter('zone'), 'length':14}, + 'Hostname': {'get': itemgetter('dns'), 'length':20}, + 'Code': {'get': itemgetter('code'), 'length':18}, + 'Description': {'get': itemgetter('description'), 'length':30}, + 'NotBefore': {'get': itemgetter('not_before'), 'length':25}, + 'NotAfter': {'get': itemgetter('not_after'), 'length':25}, + 'T:': {'length': 30}, +} + +def get_column(name, event=None): + if name.startswith('T:'): + return event[name] + return HEADERS[name]['get'](event) + +def list(region, headers, order, completed): + """List status events for all instances in a given region""" + + import re + + ec2 = boto.connect_ec2(region=region) + + reservations = ec2.get_all_reservations() + + instanceinfo = {} + events = {} + + displaytags = [ x for x in headers if x.startswith('T:') ] + + # Collect the tag for every possible instance + for res in reservations: + for instance in res.instances: + iid = instance.id + instanceinfo[iid] = {} + for tagname in displaytags: + _, tag = tagname.split(':', 1) + instanceinfo[iid][tagname] = instance.tags.get(tag,'') + instanceinfo[iid]['dns'] = instance.public_dns_name + + stats = ec2.get_all_instance_status() + + for stat in stats: + if stat.events: + for event in stat.events: + events[stat.id] = {} + events[stat.id]['id'] = stat.id + events[stat.id]['dns'] = instanceinfo[stat.id]['dns'] + events[stat.id]['zone'] = stat.zone + for tag in displaytags: + events[stat.id][tag] = instanceinfo[stat.id][tag] + events[stat.id]['code'] = event.code + events[stat.id]['description'] = event.description + events[stat.id]['not_before'] = event.not_before + events[stat.id]['not_after'] = event.not_after + if completed and re.match('^\[Completed\]',event.description): + events[stat.id]['not_before'] = 'Completed' + events[stat.id]['not_after'] = 'Completed' + + # Create format string + format_string = "" + for h in headers: + if h.startswith('T:'): + format_string += "%%-%ds" % HEADERS['T:']['length'] + else: + format_string += "%%-%ds" % HEADERS[h]['length'] + + + print format_string % headers + print "-" * len(format_string % headers) + + for instance in sorted(events, + key=lambda ev: get_column(order, events[ev])): + e = events[instance] + print format_string % tuple(get_column(h, e) for h in headers) + +if __name__ == "__main__": + import boto + from optparse import OptionParser + from boto.ec2 import regions + + parser = OptionParser(version=VERSION, usage=usage) + parser.add_option("-a", "--all", help="check all regions", dest="all", default=False,action="store_true") + parser.add_option("-r", "--region", help="region to check (default us-east-1)", dest="region", default="us-east-1") + parser.add_option("-H", "--headers", help="Set headers (use 'T:tagname' for including tags)", default=None, action="store", dest="headers", metavar="ID,Zone,Hostname,Code,Description,NotBefore,NotAfter,T:Name") + parser.add_option("-S", "--sort", help="Header for sort order", default=None, action="store", dest="order",metavar="HeaderName") + parser.add_option("-c", "--completed", help="List time fields as \"Completed\" for completed events (Default: false)", default=False, action="store_true", dest="completed") + + (options, args) = parser.parse_args() + + if options.headers: + headers = tuple(options.headers.split(',')) + else: + headers = ('ID', 'Zone', 'Hostname', 'Code', 'NotBefore', 'NotAfter') + + if options.order: + order = options.order + else: + order = 'ID' + + if options.all: + for r in regions(): + print "Region %s" % r.name + list(r, headers, order, options.completed) + else: + # Connect the region + for r in regions(): + if r.name == options.region: + region = r + break + else: + print "Region %s not found." % options.region + sys.exit(1) + + list(r, headers, order, options.completed) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/kill_instance b/desktop/core/ext-py/boto-2.38.0/bin/kill_instance new file mode 100755 index 0000000000000000000000000000000000000000..6683bb838f1dd6ea495dababc7d63b443a0b3769 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/kill_instance @@ -0,0 +1,35 @@ +#!/usr/bin/env python + +import sys +from optparse import OptionParser + +import boto +from boto.ec2 import regions + + + +def kill_instance(region, ids): + """Kill an instances given it's instance IDs""" + # Connect the region + ec2 = boto.connect_ec2(region=region) + for instance_id in ids: + print("Stopping instance: %s" % instance_id) + ec2.terminate_instances([instance_id]) + + +if __name__ == "__main__": + parser = OptionParser(usage="kill_instance [-r] id [id ...]") + parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1") + (options, args) = parser.parse_args() + if not args: + parser.print_help() + sys.exit(1) + for r in regions(): + if r.name == options.region: + region = r + break + else: + print("Region %s not found." % options.region) + sys.exit(1) + + kill_instance(region, args) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/launch_instance b/desktop/core/ext-py/boto-2.38.0/bin/launch_instance new file mode 100755 index 0000000000000000000000000000000000000000..62bc5e4ddcfb3013407e26d5f3fddc1ed4deef60 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/launch_instance @@ -0,0 +1,252 @@ +#!/usr/bin/env python +# Copyright (c) 2009 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +# +# Utility to launch an EC2 Instance +# +VERSION="0.2" + + +CLOUD_INIT_SCRIPT = """#!/usr/bin/env python +f = open("/etc/boto.cfg", "w") +f.write(\"\"\"%s\"\"\") +f.close() +""" +import boto.pyami.config +import boto.utils +import re, os +from boto.compat import ConfigParser + +class Config(boto.pyami.config.Config): + """A special config class that also adds import abilities + Directly in the config file. To have a config file import + another config file, simply use "#import " where + is either a relative path or a full URL to another config + """ + + def __init__(self): + ConfigParser.__init__(self, {'working_dir' : '/mnt/pyami', 'debug' : '0'}) + + def add_config(self, file_url): + """Add a config file to this configuration + :param file_url: URL for the file to add, or a local path + :type file_url: str + """ + if not re.match("^([a-zA-Z0-9]*:\/\/)(.*)", file_url): + if not file_url.startswith("/"): + file_url = os.path.join(os.getcwd(), file_url) + file_url = "file://%s" % file_url + (base_url, file_name) = file_url.rsplit("/", 1) + base_config = boto.utils.fetch_file(file_url) + base_config.seek(0) + for line in base_config.readlines(): + match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line) + if match: + self.add_config("%s/%s" % (base_url, match.group(1))) + base_config.seek(0) + self.readfp(base_config) + + def add_creds(self, ec2): + """Add the credentials to this config if they don't already exist""" + if not self.has_section('Credentials'): + self.add_section('Credentials') + self.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id) + self.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key) + + + def __str__(self): + """Get config as string""" + from StringIO import StringIO + s = StringIO() + self.write(s) + return s.getvalue() + +SCRIPTS = [] + +def scripts_callback(option, opt, value, parser): + arg = value.split(',') + if len(arg) == 1: + SCRIPTS.append(arg[0]) + else: + SCRIPTS.extend(arg) + setattr(parser.values, option.dest, SCRIPTS) + +def add_script(scr_url): + """Read a script and any scripts that are added using #import""" + base_url = '/'.join(scr_url.split('/')[:-1]) + '/' + script_raw = boto.utils.fetch_file(scr_url) + script_content = '' + for line in script_raw.readlines(): + match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line) + #if there is an import + if match: + #Read the other script and put it in that spot + script_content += add_script("%s/%s" % (base_url, match.group(1))) + else: + #Otherwise, add the line and move on + script_content += line + return script_content + +if __name__ == "__main__": + try: + import readline + except ImportError: + pass + import sys + import time + import boto + from boto.ec2 import regions + from optparse import OptionParser + from boto.mashups.iobject import IObject + parser = OptionParser(version=VERSION, usage="%prog [options] config_url") + parser.add_option("-c", "--max-count", help="Maximum number of this type of instance to launch", dest="max_count", default="1") + parser.add_option("--min-count", help="Minimum number of this type of instance to launch", dest="min_count", default="1") + parser.add_option("--cloud-init", help="Indicates that this is an instance that uses 'CloudInit', Ubuntu's cloud bootstrap process. This wraps the config in a shell script command instead of just passing it in directly", dest="cloud_init", default=False, action="store_true") + parser.add_option("-g", "--groups", help="Security Groups to add this instance to", action="append", dest="groups") + parser.add_option("-a", "--ami", help="AMI to launch", dest="ami_id") + parser.add_option("-t", "--type", help="Type of Instance (default m1.small)", dest="type", default="m1.small") + parser.add_option("-k", "--key", help="Keypair", dest="key_name") + parser.add_option("-z", "--zone", help="Zone (default us-east-1a)", dest="zone", default="us-east-1a") + parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1") + parser.add_option("-i", "--ip", help="Elastic IP", dest="elastic_ip") + parser.add_option("-n", "--no-add-cred", help="Don't add a credentials section", default=False, action="store_true", dest="nocred") + parser.add_option("--save-ebs", help="Save the EBS volume on shutdown, instead of deleting it", default=False, action="store_true", dest="save_ebs") + parser.add_option("-w", "--wait", help="Wait until instance is running", default=False, action="store_true", dest="wait") + parser.add_option("-d", "--dns", help="Returns public and private DNS (implicates --wait)", default=False, action="store_true", dest="dns") + parser.add_option("-T", "--tag", help="Set tag", default=None, action="append", dest="tags", metavar="key:value") + parser.add_option("-s", "--scripts", help="Pass in a script or a folder containing scripts to be run when the instance starts up, assumes cloud-init. Specify scripts in a list specified by commas. If multiple scripts are specified, they are run lexically (A good way to ensure they run in the order is to prefix filenames with numbers)", type='string', action="callback", callback=scripts_callback) + parser.add_option("--role", help="IAM Role to use, this implies --no-add-cred", dest="role") + + (options, args) = parser.parse_args() + + if len(args) < 1: + parser.print_help() + sys.exit(1) + file_url = os.path.expanduser(args[0]) + + cfg = Config() + cfg.add_config(file_url) + + for r in regions(): + if r.name == options.region: + region = r + break + else: + print("Region %s not found." % options.region) + sys.exit(1) + ec2 = boto.connect_ec2(region=region) + if not options.nocred and not options.role: + cfg.add_creds(ec2) + + iobj = IObject() + if options.ami_id: + ami = ec2.get_image(options.ami_id) + else: + ami_id = options.ami_id + l = [(a, a.id, a.location) for a in ec2.get_all_images()] + ami = iobj.choose_from_list(l, prompt='Choose AMI') + + if options.key_name: + key_name = options.key_name + else: + l = [(k, k.name, '') for k in ec2.get_all_key_pairs()] + key_name = iobj.choose_from_list(l, prompt='Choose Keypair').name + + if options.groups: + groups = options.groups + else: + groups = [] + l = [(g, g.name, g.description) for g in ec2.get_all_security_groups()] + g = iobj.choose_from_list(l, prompt='Choose Primary Security Group') + while g != None: + groups.append(g) + l.remove((g, g.name, g.description)) + g = iobj.choose_from_list(l, prompt='Choose Additional Security Group (0 to quit)') + + user_data = str(cfg) + # If it's a cloud init AMI, + # then we need to wrap the config in our + # little wrapper shell script + + if options.cloud_init: + user_data = CLOUD_INIT_SCRIPT % user_data + scriptuples = [] + if options.scripts: + scripts = options.scripts + scriptuples.append(('user_data', user_data)) + for scr in scripts: + scr_url = scr + if not re.match("^([a-zA-Z0-9]*:\/\/)(.*)", scr_url): + if not scr_url.startswith("/"): + scr_url = os.path.join(os.getcwd(), scr_url) + try: + newfiles = os.listdir(scr_url) + for f in newfiles: + #put the scripts in the folder in the array such that they run in the correct order + scripts.insert(scripts.index(scr) + 1, scr.split("/")[-1] + "/" + f) + except OSError: + scr_url = "file://%s" % scr_url + try: + scriptuples.append((scr, add_script(scr_url))) + except Exception as e: + pass + + user_data = boto.utils.write_mime_multipart(scriptuples, compress=True) + + shutdown_proc = "terminate" + if options.save_ebs: + shutdown_proc = "save" + + instance_profile_name = None + if options.role: + instance_profile_name = options.role + + r = ami.run(min_count=int(options.min_count), max_count=int(options.max_count), + key_name=key_name, user_data=user_data, + security_groups=groups, instance_type=options.type, + placement=options.zone, instance_initiated_shutdown_behavior=shutdown_proc, + instance_profile_name=instance_profile_name) + + instance = r.instances[0] + + if options.tags: + for tag_pair in options.tags: + name = tag_pair + value = '' + if ':' in tag_pair: + name, value = tag_pair.split(':', 1) + instance.add_tag(name, value) + + if options.dns: + options.wait = True + + if not options.wait: + sys.exit(0) + + while True: + instance.update() + if instance.state == 'running': + break + time.sleep(3) + + if options.dns: + print("Public DNS name: %s" % instance.public_dns_name) + print("Private DNS name: %s" % instance.private_dns_name) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/list_instances b/desktop/core/ext-py/boto-2.38.0/bin/list_instances new file mode 100755 index 0000000000000000000000000000000000000000..e48ec3901caeaf83cc27a0eefb1c06482343b66e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/list_instances @@ -0,0 +1,90 @@ +#!/usr/bin/env python + +import sys +from operator import attrgetter +from optparse import OptionParser + +import boto +from boto.ec2 import regions + + +HEADERS = { + 'ID': {'get': attrgetter('id'), 'length':15}, + 'Zone': {'get': attrgetter('placement'), 'length':15}, + 'Groups': {'get': attrgetter('groups'), 'length':30}, + 'Hostname': {'get': attrgetter('public_dns_name'), 'length':50}, + 'PrivateHostname': {'get': attrgetter('private_dns_name'), 'length':50}, + 'State': {'get': attrgetter('state'), 'length':15}, + 'Image': {'get': attrgetter('image_id'), 'length':15}, + 'Type': {'get': attrgetter('instance_type'), 'length':15}, + 'IP': {'get': attrgetter('ip_address'), 'length':16}, + 'PrivateIP': {'get': attrgetter('private_ip_address'), 'length':16}, + 'Key': {'get': attrgetter('key_name'), 'length':25}, + 'T:': {'length': 30}, +} + +def get_column(name, instance=None): + if name.startswith('T:'): + _, tag = name.split(':', 1) + return instance.tags.get(tag, '') + return HEADERS[name]['get'](instance) + + +def main(): + parser = OptionParser() + parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1") + parser.add_option("-H", "--headers", help="Set headers (use 'T:tagname' for including tags)", default=None, action="store", dest="headers", metavar="ID,Zone,Groups,Hostname,State,T:Name") + parser.add_option("-t", "--tab", help="Tab delimited, skip header - useful in shell scripts", action="store_true", default=False) + parser.add_option("-f", "--filter", help="Filter option sent to DescribeInstances API call, format is key1=value1,key2=value2,...", default=None) + (options, args) = parser.parse_args() + + + # Connect the region + for r in regions(): + if r.name == options.region: + region = r + break + else: + print("Region %s not found." % options.region) + sys.exit(1) + ec2 = boto.connect_ec2(region=region) + + # Read headers + if options.headers: + headers = tuple(options.headers.split(',')) + else: + headers = ("ID", 'Zone', "Groups", "Hostname") + + # Create format string + format_string = "" + for h in headers: + if h.startswith('T:'): + format_string += "%%-%ds" % HEADERS['T:']['length'] + else: + format_string += "%%-%ds" % HEADERS[h]['length'] + + + # Parse filters (if any) + if options.filter: + filters = dict([entry.split('=') for entry in options.filter.split(',')]) + else: + filters = {} + + # List and print + + if not options.tab: + print(format_string % headers) + print("-" * len(format_string % headers)) + + for r in ec2.get_all_reservations(filters=filters): + groups = [g.name for g in r.groups] + for i in r.instances: + i.groups = ','.join(groups) + if options.tab: + print("\t".join(tuple(get_column(h, i) for h in headers))) + else: + print(format_string % tuple(get_column(h, i) for h in headers)) + + +if __name__ == "__main__": + main() diff --git a/desktop/core/ext-py/boto-2.38.0/bin/lss3 b/desktop/core/ext-py/boto-2.38.0/bin/lss3 new file mode 100755 index 0000000000000000000000000000000000000000..0add2647776416f74c9053fc1e71390770ae8a55 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/lss3 @@ -0,0 +1,113 @@ +#!/usr/bin/env python +import boto +from boto.exception import S3ResponseError +from boto.s3.connection import OrdinaryCallingFormat + + +def sizeof_fmt(num): + for x in ['b ', 'KB', 'MB', 'GB', 'TB', 'XB']: + if num < 1024.0: + return "%3.1f %s" % (num, x) + num /= 1024.0 + return "%3.1f %s" % (num, x) + + +def list_bucket(b, prefix=None, marker=None): + """List everything in a bucket""" + from boto.s3.prefix import Prefix + from boto.s3.key import Key + total = 0 + + if prefix: + if not prefix.endswith("/"): + prefix = prefix + "/" + query = b.list(prefix=prefix, delimiter="/", marker=marker) + print("%s" % prefix) + else: + query = b.list(delimiter="/", marker=marker) + + num = 0 + for k in query: + num += 1 + mode = "-rwx---" + if isinstance(k, Prefix): + mode = "drwxr--" + size = 0 + else: + size = k.size + for g in k.get_acl().acl.grants: + if g.id == None: + if g.permission == "READ": + mode = "-rwxr--" + elif g.permission == "FULL_CONTROL": + mode = "-rwxrwx" + if isinstance(k, Key): + print("%s\t%s\t%010s\t%s" % (mode, k.last_modified, + sizeof_fmt(size), k.name)) + else: + #If it's not a Key object, it doesn't have a last_modified time, so + #print nothing instead + print("%s\t%s\t%010s\t%s" % (mode, ' ' * 24, + sizeof_fmt(size), k.name)) + total += size + print ("=" * 80) + print ("\t\tTOTAL: \t%010s \t%i Files" % (sizeof_fmt(total), num)) + + +def list_buckets(s3, display_tags=False): + """List all the buckets""" + for b in s3.get_all_buckets(): + print(b.name) + if display_tags: + try: + tags = b.get_tags() + for tag in tags[0]: + print(" %s:%s" % (tag.key, tag.value)) + except S3ResponseError as e: + if e.status != 404: + raise + + +def main(): + import optparse + import sys + + usage = "usage: %prog [options] [BUCKET1] [BUCKET2]" + description = "List all S3 buckets OR list keys in the named buckets" + parser = optparse.OptionParser(description=description, usage=usage) + parser.add_option('-m', '--marker', + help='The S3 key where the listing starts after it.') + parser.add_option('-t', '--tags', action='store_true', + help='Display tags when listing all buckets.') + options, buckets = parser.parse_args() + marker = options.marker + + if not buckets: + list_buckets(boto.connect_s3(), options.tags) + sys.exit(0) + + if options.tags: + print("-t option only works for the overall bucket list") + sys.exit(1) + + pairs = [] + mixedCase = False + for name in buckets: + if "/" in name: + pairs.append(name.split("/", 1)) + else: + pairs.append([name, None]) + if pairs[-1][0].lower() != pairs[-1][0]: + mixedCase = True + + if mixedCase: + s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat()) + else: + s3 = boto.connect_s3() + + for name, prefix in pairs: + list_bucket(s3.get_bucket(name), prefix, marker=marker) + + +if __name__ == "__main__": + main() diff --git a/desktop/core/ext-py/boto-2.38.0/bin/mturk b/desktop/core/ext-py/boto-2.38.0/bin/mturk new file mode 100755 index 0000000000000000000000000000000000000000..a388391f851840ca2ad8b4be1f728777618b3714 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/mturk @@ -0,0 +1,514 @@ +#!/usr/bin/env python +# Copyright 2012, 2014 Kodi Arfer +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +import argparse # Hence, Python 2.7 is required. +import sys +import os.path +import string +import inspect +import datetime, calendar +import boto.mturk.connection, boto.mturk.price, boto.mturk.question, boto.mturk.qualification +from boto.compat import json + +# -------------------------------------------------- +# Globals +# ------------------------------------------------- + +interactive = False +con = None +mturk_website = None + +default_nicknames_path = os.path.expanduser('~/.boto_mturkcli_hit_nicknames') +nicknames = {} +nickname_pool = set(string.ascii_lowercase) + +get_assignments_page_size = 100 + +time_units = dict( + s = 1, + min = 60, + h = 60 * 60, + d = 24 * 60 * 60) + +qual_requirements = dict( + Adult = '00000000000000000060', + Locale = '00000000000000000071', + NumberHITsApproved = '00000000000000000040', + PercentAssignmentsSubmitted = '00000000000000000000', + PercentAssignmentsAbandoned = '00000000000000000070', + PercentAssignmentsReturned = '000000000000000000E0', + PercentAssignmentsApproved = '000000000000000000L0', + PercentAssignmentsRejected = '000000000000000000S0') + +qual_comparators = {v : k for k, v in dict( + LessThan = '<', LessThanOrEqualTo = '<=', + GreaterThan = '>', GreaterThanOrEqualTo = '>=', + EqualTo = '==', NotEqualTo = '!=', + Exists = 'exists').items()} + +example_config_file = '''Example configuration file: + + { + "title": "Pick your favorite color", + "description": "In this task, you are asked to pick your favorite color.", + "reward": 0.50, + "assignments": 10, + "duration": "20 min", + "keywords": ["color", "favorites", "survey"], + "lifetime": "7 d", + "approval_delay": "14 d", + "qualifications": [ + "PercentAssignmentsApproved > 90", + "Locale == US", + "2ARFPLSP75KLA8M8DH1HTEQVJT3SY6 exists" + ], + "question_url": "http://example.com/myhit", + "question_frame_height": 450 + }''' + +# -------------------------------------------------- +# Subroutines +# -------------------------------------------------- + +def unjson(path): + with open(path) as o: + return json.load(o) + +def add_argparse_arguments(parser): + parser.add_argument('-P', '--production', + dest = 'sandbox', action = 'store_false', default = True, + help = 'use the production site (default: use the sandbox)') + parser.add_argument('--nicknames', + dest = 'nicknames_path', metavar = 'PATH', + default = default_nicknames_path, + help = 'where to store HIT nicknames (default: {})'.format( + default_nicknames_path)) + +def init_by_args(args): + init(args.sandbox, args.nicknames_path) + +def init(sandbox = False, nicknames_path = default_nicknames_path): + global con, mturk_website, nicknames, original_nicknames + + mturk_website = 'workersandbox.mturk.com' if sandbox else 'www.mturk.com' + con = boto.mturk.connection.MTurkConnection( + host = 'mechanicalturk.sandbox.amazonaws.com' if sandbox else 'mechanicalturk.amazonaws.com') + + try: + nicknames = unjson(nicknames_path) + except IOError: + nicknames = {} + original_nicknames = nicknames.copy() + +def save_nicknames(nicknames_path = default_nicknames_path): + if nicknames != original_nicknames: + with open(nicknames_path, 'w') as o: + json.dump(nicknames, o, sort_keys = True, indent = 4) + print >>o + +def parse_duration(s): + '''Parses durations like "2 d", "48 h", "2880 min", +"172800 s", or "172800".''' + x = s.split() + return int(x[0]) * time_units['s' if len(x) == 1 else x[1]] +def display_duration(n): + for unit, m in sorted(time_units.items(), key = lambda x: -x[1]): + if n % m == 0: + return '{} {}'.format(n / m, unit) + +def parse_qualification(inp): + '''Parses qualifications like "PercentAssignmentsApproved > 90", +"Locale == US", and "2ARFPLSP75KLA8M8DH1HTEQVJT3SY6 exists".''' + inp = inp.split() + name, comparator, value = inp.pop(0), inp.pop(0), (inp[0] if len(inp) else None) + qtid = qual_requirements.get(name) + if qtid is None: + # Treat "name" as a Qualification Type ID. + qtid = name + if qtid == qual_requirements['Locale']: + return boto.mturk.qualification.LocaleRequirement( + qual_comparators[comparator], + value, + required_to_preview = False) + return boto.mturk.qualification.Requirement( + qtid, + qual_comparators[comparator], + value, + required_to_preview = qtid == qual_requirements['Adult']) + # Thus required_to_preview is true only for the + # Worker_Adult requirement. + +def preview_url(hit): + return 'https://{}/mturk/preview?groupId={}'.format( + mturk_website, hit.HITTypeId) + +def parse_timestamp(s): + '''Takes a timestamp like "2012-11-24T16:34:41Z". + +Returns a datetime object in the local time zone.''' + return datetime.datetime.fromtimestamp( + calendar.timegm( + datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%SZ').timetuple())) + +def get_hitid(nickname_or_hitid): + return nicknames.get(nickname_or_hitid) or nickname_or_hitid + +def get_nickname(hitid): + for k, v in nicknames.items(): + if v == hitid: + return k + return None + +def display_datetime(dt): + return dt.strftime('%e %b %Y, %l:%M %P') + +def display_hit(hit, verbose = False): + et = parse_timestamp(hit.Expiration) + return '\n'.join([ + '{} - {} ({}, {}, {})'.format( + get_nickname(hit.HITId), + hit.Title, + hit.FormattedPrice, + display_duration(int(hit.AssignmentDurationInSeconds)), + hit.HITStatus), + 'HIT ID: ' + hit.HITId, + 'Type ID: ' + hit.HITTypeId, + 'Group ID: ' + hit.HITGroupId, + 'Preview: ' + preview_url(hit), + 'Created {} {}'.format( + display_datetime(parse_timestamp(hit.CreationTime)), + 'Expired' if et <= datetime.datetime.now() else + 'Expires ' + display_datetime(et)), + 'Assignments: {} -- {} avail, {} pending, {} reviewable, {} reviewed'.format( + hit.MaxAssignments, + hit.NumberOfAssignmentsAvailable, + hit.NumberOfAssignmentsPending, + int(hit.MaxAssignments) - (int(hit.NumberOfAssignmentsAvailable) + int(hit.NumberOfAssignmentsPending) + int(hit.NumberOfAssignmentsCompleted)), + hit.NumberOfAssignmentsCompleted) + if hasattr(hit, 'NumberOfAssignmentsAvailable') + else 'Assignments: {} total'.format(hit.MaxAssignments), + # For some reason, SearchHITs includes the + # NumberOfAssignmentsFoobar fields but GetHIT doesn't. + ] + ([] if not verbose else [ + '\nDescription: ' + hit.Description, + '\nKeywords: ' + hit.Keywords + ])) + '\n' + +def digest_assignment(a): + return dict( + answers = {str(x.qid): str(x.fields[0]) for x in a.answers[0]}, + **{k: str(getattr(a, k)) for k in ( + 'AcceptTime', 'SubmitTime', + 'HITId', 'AssignmentId', 'WorkerId', + 'AssignmentStatus')}) + +# -------------------------------------------------- +# Commands +# -------------------------------------------------- + +def get_balance(): + return con.get_account_balance() + +def show_hit(hit): + return display_hit(con.get_hit(hit)[0], verbose = True) + +def list_hits(): + 'Lists your 10 most recently created HITs, with the most recent last.' + return '\n'.join(reversed(map(display_hit, con.search_hits( + sort_by = 'CreationTime', + sort_direction = 'Descending', + page_size = 10)))) + +def make_hit(title, description, keywords, reward, question_url, question_frame_height, duration, assignments, approval_delay, lifetime, qualifications = []): + r = con.create_hit( + title = title, + description = description, + keywords = con.get_keywords_as_string(keywords), + reward = con.get_price_as_price(reward), + question = boto.mturk.question.ExternalQuestion( + question_url, + question_frame_height), + duration = parse_duration(duration), + qualifications = boto.mturk.qualification.Qualifications( + map(parse_qualification, qualifications)), + max_assignments = assignments, + approval_delay = parse_duration(approval_delay), + lifetime = parse_duration(lifetime)) + nick = None + available_nicks = nickname_pool - set(nicknames.keys()) + if available_nicks: + nick = min(available_nicks) + nicknames[nick] = r[0].HITId + if interactive: + print 'Nickname:', nick + print 'HIT ID:', r[0].HITId + print 'Preview:', preview_url(r[0]) + else: + return r[0] + +def extend_hit(hit, assignments_increment = None, expiration_increment = None): + con.extend_hit(hit, assignments_increment, expiration_increment) + +def expire_hit(hit): + con.expire_hit(hit) + +def delete_hit(hit): + '''Deletes a HIT using DisableHIT. + +Unreviewed assignments get automatically approved. Unsubmitted +assignments get automatically approved upon submission. + +The API docs say DisableHIT doesn't work with Reviewable HITs, +but apparently, it does.''' + con.disable_hit(hit) + global nicknames + nicknames = {k: v for k, v in nicknames.items() if v != hit} + +def list_assignments(hit, only_reviewable = False): + # Accumulate all relevant assignments, one page of results at + # a time. + assignments = [] + page = 1 + while True: + rs = con.get_assignments( + hit_id = hit, + page_size = get_assignments_page_size, + page_number = page, + status = 'Submitted' if only_reviewable else None) + assignments += map(digest_assignment, rs) + if len(assignments) >= int(rs.TotalNumResults): + break + page += 1 + if interactive: + print json.dumps(assignments, sort_keys = True, indent = 4) + print ' '.join([a['AssignmentId'] for a in assignments]) + print ' '.join([a['WorkerId'] + ',' + a['AssignmentId'] for a in assignments]) + else: + return assignments + +def grant_bonus(message, amount, pairs): + for worker, assignment in pairs: + con.grant_bonus(worker, assignment, con.get_price_as_price(amount), message) + if interactive: print 'Bonused', worker + +def approve_assignments(message, assignments): + for a in assignments: + con.approve_assignment(a, message) + if interactive: print 'Approved', a + +def reject_assignments(message, assignments): + for a in assignments: + con.reject_assignment(a, message) + if interactive: print 'Rejected', a + +def unreject_assignments(message, assignments): + for a in assignments: + con.approve_rejected_assignment(a, message) + if interactive: print 'Unrejected', a + +def notify_workers(subject, text, workers): + con.notify_workers(workers, subject, text) + +def give_qualification(qualification, workers, value = 1, notify = True): + for w in workers: + con.assign_qualification(qualification, w, value, notify) + if interactive: print 'Gave to', w + +def revoke_qualification(qualification, workers, message = None): + for w in workers: + con.revoke_qualification(w, qualification, message) + if interactive: print 'Revoked from', w + +# -------------------------------------------------- +# Mainline code +# -------------------------------------------------- + +if __name__ == '__main__': + interactive = True + + parser = argparse.ArgumentParser() + add_argparse_arguments(parser) + subs = parser.add_subparsers() + + sub = subs.add_parser('bal', + help = 'display your prepaid balance') + sub.set_defaults(f = get_balance, a = lambda: []) + + sub = subs.add_parser('hit', + help = 'get information about a HIT') + sub.add_argument('HIT', + help = 'nickname or ID of the HIT to show') + sub.set_defaults(f = show_hit, a = lambda: + [get_hitid(args.HIT)]) + + sub = subs.add_parser('hits', + help = 'list all your HITs') + sub.set_defaults(f = list_hits, a = lambda: []) + + sub = subs.add_parser('new', + help = 'create a new HIT (external questions only)', + epilog = example_config_file, + formatter_class = argparse.RawDescriptionHelpFormatter) + sub.add_argument('JSON_PATH', + help = 'path to JSON configuration file for the HIT') + sub.add_argument('-u', '--question-url', dest = 'question_url', + metavar = 'URL', + help = 'URL for the external question') + sub.add_argument('-a', '--assignments', dest = 'assignments', + type = int, metavar = 'N', + help = 'number of assignments') + sub.add_argument('-r', '--reward', dest = 'reward', + type = float, metavar = 'PRICE', + help = 'reward amount, in USD') + sub.set_defaults(f = make_hit, a = lambda: dict( + unjson(args.JSON_PATH).items() + [(k, getattr(args, k)) + for k in ('question_url', 'assignments', 'reward') + if getattr(args, k) is not None])) + + sub = subs.add_parser('extend', + help = 'add assignments or time to a HIT') + sub.add_argument('HIT', + help = 'nickname or ID of the HIT to extend') + sub.add_argument('-a', '--assignments', dest = 'assignments', + metavar = 'N', type = int, + help = 'number of assignments to add') + sub.add_argument('-t', '--time', dest = 'time', + metavar = 'T', + help = 'amount of time to add to the expiration date') + sub.set_defaults(f = extend_hit, a = lambda: + [get_hitid(args.HIT), args.assignments, + args.time and parse_duration(args.time)]) + + sub = subs.add_parser('expire', + help = 'force a HIT to expire without deleting it') + sub.add_argument('HIT', + help = 'nickname or ID of the HIT to expire') + sub.set_defaults(f = expire_hit, a = lambda: + [get_hitid(args.HIT)]) + + sub = subs.add_parser('rm', + help = 'delete a HIT') + sub.add_argument('HIT', + help = 'nickname or ID of the HIT to delete') + sub.set_defaults(f = delete_hit, a = lambda: + [get_hitid(args.HIT)]) + + sub = subs.add_parser('as', + help = "list a HIT's submitted assignments") + sub.add_argument('HIT', + help = 'nickname or ID of the HIT to get assignments for') + sub.add_argument('-r', '--reviewable', dest = 'only_reviewable', + action = 'store_true', + help = 'show only unreviewed assignments') + sub.set_defaults(f = list_assignments, a = lambda: + [get_hitid(args.HIT), args.only_reviewable]) + + for command, fun, helpmsg in [ + ('approve', approve_assignments, 'approve assignments'), + ('reject', reject_assignments, 'reject assignments'), + ('unreject', unreject_assignments, 'approve previously rejected assignments')]: + sub = subs.add_parser(command, help = helpmsg) + sub.add_argument('ASSIGNMENT', nargs = '+', + help = 'ID of an assignment') + sub.add_argument('-m', '--message', dest = 'message', + metavar = 'TEXT', + help = 'feedback message shown to workers') + sub.set_defaults(f = fun, a = lambda: + [args.message, args.ASSIGNMENT]) + + sub = subs.add_parser('bonus', + help = 'give some workers a bonus') + sub.add_argument('AMOUNT', type = float, + help = 'bonus amount, in USD') + sub.add_argument('MESSAGE', + help = 'the reason for the bonus (shown to workers in an email sent by MTurk)') + sub.add_argument('WIDAID', nargs = '+', + help = 'a WORKER_ID,ASSIGNMENT_ID pair') + sub.set_defaults(f = grant_bonus, a = lambda: + [args.MESSAGE, args.AMOUNT, + [p.split(',') for p in args.WIDAID]]) + + sub = subs.add_parser('notify', + help = 'send a message to some workers') + sub.add_argument('SUBJECT', + help = 'subject of the message') + sub.add_argument('MESSAGE', + help = 'text of the message') + sub.add_argument('WORKER', nargs = '+', + help = 'ID of a worker') + sub.set_defaults(f = notify_workers, a = lambda: + [args.SUBJECT, args.MESSAGE, args.WORKER]) + + sub = subs.add_parser('give-qual', + help = 'give a qualification to some workers') + sub.add_argument('QUAL', + help = 'ID of the qualification') + sub.add_argument('WORKER', nargs = '+', + help = 'ID of a worker') + sub.add_argument('-v', '--value', dest = 'value', + metavar = 'N', type = int, default = 1, + help = 'value of the qualification') + sub.add_argument('--dontnotify', dest = 'notify', + action = 'store_false', default = True, + help = "don't notify workers") + sub.set_defaults(f = give_qualification, a = lambda: + [args.QUAL, args.WORKER, args.value, args.notify]) + + sub = subs.add_parser('revoke-qual', + help = 'revoke a qualification from some workers') + sub.add_argument('QUAL', + help = 'ID of the qualification') + sub.add_argument('WORKER', nargs = '+', + help = 'ID of a worker') + sub.add_argument('-m', '--message', dest = 'message', + metavar = 'TEXT', + help = 'the reason the qualification was revoked (shown to workers in an email sent by MTurk)') + sub.set_defaults(f = revoke_qualification, a = lambda: + [args.QUAL, args.WORKER, args.message]) + + args = parser.parse_args() + + init_by_args(args) + + f = args.f + a = args.a() + if isinstance(a, dict): + # We do some introspective gymnastics so we can produce a + # less incomprehensible error message if some arguments + # are missing. + spec = inspect.getargspec(f) + missing = set(spec.args[: len(spec.args) - len(spec.defaults)]) - set(a.keys()) + if missing: + raise ValueError('Missing arguments: ' + ', '.join(missing)) + doit = lambda: f(**a) + else: + doit = lambda: f(*a) + + try: + x = doit() + except boto.mturk.connection.MTurkRequestError as e: + print 'MTurk error:', e.error_message + sys.exit(1) + + if x is not None: + print x + + save_nicknames() diff --git a/desktop/core/ext-py/boto-2.38.0/bin/pyami_sendmail b/desktop/core/ext-py/boto-2.38.0/bin/pyami_sendmail new file mode 100755 index 0000000000000000000000000000000000000000..550214595e0f755806f59d3787cebca1353c41b8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/pyami_sendmail @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +# +# Send Mail from a PYAMI instance, or anything that has a boto.cfg +# properly set up +# +VERSION="0.1" +usage = """%prog [options] +Sends whatever is on stdin to the recipient specified by your boto.cfg +or whoevery you specify in the options here. +""" + +if __name__ == "__main__": + from boto.utils import notify + import sys + from optparse import OptionParser + parser = OptionParser(version=VERSION, usage=usage) + parser.add_option("-t", "--to", help="Optional to address to send to (default from your boto.cfg)", action="store", default=None, dest="to") + parser.add_option("-s", "--subject", help="Optional Subject to send this report as", action="store", default="Report", dest="subject") + parser.add_option("-f", "--file", help="Optionally, read from a file instead of STDIN", action="store", default=None, dest="file") + parser.add_option("--html", help="HTML Format the email", action="store_true", default=False, dest="html") + parser.add_option("--no-instance-id", help="If set, don't append the instance id", action="store_false", default=True, dest="append_instance_id") + + (options, args) = parser.parse_args() + if options.file: + body = open(options.file, 'r').read() + else: + body = sys.stdin.read() + + if options.html: + notify(options.subject, html_body=body, to_string=options.to, append_instance_id=options.append_instance_id) + else: + notify(options.subject, body=body, to_string=options.to, append_instance_id=options.append_instance_id) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/route53 b/desktop/core/ext-py/boto-2.38.0/bin/route53 new file mode 100755 index 0000000000000000000000000000000000000000..fcdea70be9479b948d5acdc7fc32ecaa84cf9b30 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/route53 @@ -0,0 +1,205 @@ +#!/usr/bin/env python +# Author: Chris Moyer +# +# route53 is similar to sdbadmin for Route53, it's a simple +# console utility to perform the most frequent tasks with Route53 +# +# Example usage. Use route53 get after each command to see how the +# zone changes. +# +# Add a non-weighted record, change its value, then delete. Default TTL: +# +# route53 add_record ZPO9LGHZ43QB9 rr.example.com A 4.3.2.1 +# route53 change_record ZPO9LGHZ43QB9 rr.example.com A 9.8.7.6 +# route53 del_record ZPO9LGHZ43QB9 rr.example.com A 9.8.7.6 +# +# Add a weighted record with two different weights. Note that the TTL +# must be specified as route53 uses positional parameters rather than +# option flags: +# +# route53 add_record ZPO9LGHZ43QB9 wrr.example.com A 1.2.3.4 600 foo9 10 +# route53 add_record ZPO9LGHZ43QB9 wrr.example.com A 4.3.2.1 600 foo8 10 +# +# route53 change_record ZPO9LGHZ43QB9 wrr.example.com A 9.9.9.9 600 foo8 10 +# +# route53 del_record ZPO9LGHZ43QB9 wrr.example.com A 1.2.3.4 600 foo9 10 +# route53 del_record ZPO9LGHZ43QB9 wrr.example.com A 9.9.9.9 600 foo8 10 +# +# Add a non-weighted alias, change its value, then delete. Alaises inherit +# their TTLs from the backing ELB: +# +# route53 add_alias ZPO9LGHZ43QB9 alias.example.com A Z3DZXE0Q79N41H lb-1218761514.us-east-1.elb.amazonaws.com. +# route53 change_alias ZPO9LGHZ43QB9 alias.example.com. A Z3DZXE0Q79N41H lb2-1218761514.us-east-1.elb.amazonaws.com. +# route53 delete_alias ZPO9LGHZ43QB9 alias.example.com. A Z3DZXE0Q79N41H lb2-1218761514.us-east-1.elb.amazonaws.com. + +def _print_zone_info(zoneinfo): + print "="*80 + print "| ID: %s" % zoneinfo['Id'].split("/")[-1] + print "| Name: %s" % zoneinfo['Name'] + print "| Ref: %s" % zoneinfo['CallerReference'] + print "="*80 + print zoneinfo['Config'] + print + + +def create(conn, hostname, caller_reference=None, comment=''): + """Create a hosted zone, returning the nameservers""" + response = conn.create_hosted_zone(hostname, caller_reference, comment) + print "Pending, please add the following Name Servers:" + for ns in response.NameServers: + print "\t", ns + +def delete_zone(conn, hosted_zone_id): + """Delete a hosted zone by ID""" + response = conn.delete_hosted_zone(hosted_zone_id) + print response + +def ls(conn): + """List all hosted zones""" + response = conn.get_all_hosted_zones() + for zoneinfo in response['ListHostedZonesResponse']['HostedZones']: + _print_zone_info(zoneinfo) + +def get(conn, hosted_zone_id, type=None, name=None, maxitems=None): + """Get all the records for a single zone""" + response = conn.get_all_rrsets(hosted_zone_id, type, name, maxitems=maxitems) + # If a maximum number of items was set, we limit to that number + # by turning the response into an actual list (copying it) + # instead of allowing it to page + if maxitems: + response = response[:] + print '%-40s %-5s %-20s %s' % ("Name", "Type", "TTL", "Value(s)") + for record in response: + print '%-40s %-5s %-20s %s' % (record.name, record.type, record.ttl, record.to_print()) + +def _add_del(conn, hosted_zone_id, change, name, type, identifier, weight, values, ttl, comment): + from boto.route53.record import ResourceRecordSets + changes = ResourceRecordSets(conn, hosted_zone_id, comment) + change = changes.add_change(change, name, type, ttl, + identifier=identifier, weight=weight) + for value in values.split(','): + change.add_value(value) + print changes.commit() + +def _add_del_alias(conn, hosted_zone_id, change, name, type, identifier, weight, alias_hosted_zone_id, alias_dns_name, comment): + from boto.route53.record import ResourceRecordSets + changes = ResourceRecordSets(conn, hosted_zone_id, comment) + change = changes.add_change(change, name, type, + identifier=identifier, weight=weight) + change.set_alias(alias_hosted_zone_id, alias_dns_name) + print changes.commit() + +def add_record(conn, hosted_zone_id, name, type, values, ttl=600, + identifier=None, weight=None, comment=""): + """Add a new record to a zone. identifier and weight are optional.""" + _add_del(conn, hosted_zone_id, "CREATE", name, type, identifier, + weight, values, ttl, comment) + +def del_record(conn, hosted_zone_id, name, type, values, ttl=600, + identifier=None, weight=None, comment=""): + """Delete a record from a zone: name, type, ttl, identifier, and weight must match.""" + _add_del(conn, hosted_zone_id, "DELETE", name, type, identifier, + weight, values, ttl, comment) + +def add_alias(conn, hosted_zone_id, name, type, alias_hosted_zone_id, + alias_dns_name, identifier=None, weight=None, comment=""): + """Add a new alias to a zone. identifier and weight are optional.""" + _add_del_alias(conn, hosted_zone_id, "CREATE", name, type, identifier, + weight, alias_hosted_zone_id, alias_dns_name, comment) + +def del_alias(conn, hosted_zone_id, name, type, alias_hosted_zone_id, + alias_dns_name, identifier=None, weight=None, comment=""): + """Delete an alias from a zone: name, type, alias_hosted_zone_id, alias_dns_name, weight and identifier must match.""" + _add_del_alias(conn, hosted_zone_id, "DELETE", name, type, identifier, + weight, alias_hosted_zone_id, alias_dns_name, comment) + +def change_record(conn, hosted_zone_id, name, type, newvalues, ttl=600, + identifier=None, weight=None, comment=""): + """Delete and then add a record to a zone. identifier and weight are optional.""" + from boto.route53.record import ResourceRecordSets + changes = ResourceRecordSets(conn, hosted_zone_id, comment) + # Assume there are not more than 10 WRRs for a given (name, type) + responses = conn.get_all_rrsets(hosted_zone_id, type, name, maxitems=10) + for response in responses: + if response.name != name or response.type != type: + continue + if response.identifier != identifier or response.weight != weight: + continue + change1 = changes.add_change("DELETE", name, type, response.ttl, + identifier=response.identifier, + weight=response.weight) + for old_value in response.resource_records: + change1.add_value(old_value) + + change2 = changes.add_change("UPSERT", name, type, ttl, + identifier=identifier, weight=weight) + for new_value in newvalues.split(','): + change2.add_value(new_value) + print changes.commit() + +def change_alias(conn, hosted_zone_id, name, type, new_alias_hosted_zone_id, new_alias_dns_name, identifier=None, weight=None, comment=""): + """Delete and then add an alias to a zone. identifier and weight are optional.""" + from boto.route53.record import ResourceRecordSets + changes = ResourceRecordSets(conn, hosted_zone_id, comment) + # Assume there are not more than 10 WRRs for a given (name, type) + responses = conn.get_all_rrsets(hosted_zone_id, type, name, maxitems=10) + for response in responses: + if response.name != name or response.type != type: + continue + if response.identifier != identifier or response.weight != weight: + continue + change1 = changes.add_change("DELETE", name, type, + identifier=response.identifier, + weight=response.weight) + change1.set_alias(response.alias_hosted_zone_id, response.alias_dns_name) + change2 = changes.add_change("UPSERT", name, type, identifier=identifier, weight=weight) + change2.set_alias(new_alias_hosted_zone_id, new_alias_dns_name) + print changes.commit() + +def help(conn, fnc=None): + """Prints this help message""" + import inspect + self = sys.modules['__main__'] + if fnc: + try: + cmd = getattr(self, fnc) + except: + cmd = None + if not inspect.isfunction(cmd): + print "No function named: %s found" % fnc + sys.exit(2) + (args, varargs, varkw, defaults) = inspect.getargspec(cmd) + print cmd.__doc__ + print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args[1:]])) + else: + print "Usage: route53 [command]" + for cname in dir(self): + if not cname.startswith("_"): + cmd = getattr(self, cname) + if inspect.isfunction(cmd): + doc = cmd.__doc__ + print "\t%-20s %s" % (cname, doc) + sys.exit(1) + + +if __name__ == "__main__": + import boto + import sys + conn = boto.connect_route53() + self = sys.modules['__main__'] + if len(sys.argv) >= 2: + try: + cmd = getattr(self, sys.argv[1]) + except: + cmd = None + args = sys.argv[2:] + else: + cmd = help + args = [] + if not cmd: + cmd = help + try: + cmd(conn, *args) + except TypeError, e: + print e + help(conn, cmd.__name__) diff --git a/desktop/core/ext-py/boto-2.38.0/bin/s3put b/desktop/core/ext-py/boto-2.38.0/bin/s3put new file mode 100755 index 0000000000000000000000000000000000000000..da025b35874430da51cbe7ca318a679cf6e597a0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/s3put @@ -0,0 +1,438 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import getopt +import sys +import os +import boto + +from boto.compat import six + +try: + # multipart portions copyright Fabian Topfstedt + # https://gist.github.com/924094 + + import math + import mimetypes + from multiprocessing import Pool + from boto.s3.connection import S3Connection + from filechunkio import FileChunkIO + multipart_capable = True + usage_flag_multipart_capable = """ [--multipart]""" + usage_string_multipart_capable = """ + multipart - Upload files as multiple parts. This needs filechunkio. + Requires ListBucket, ListMultipartUploadParts, + ListBucketMultipartUploads and PutObject permissions.""" +except ImportError as err: + multipart_capable = False + usage_flag_multipart_capable = "" + if six.PY2: + attribute = 'message' + else: + attribute = 'msg' + usage_string_multipart_capable = '\n\n "' + \ + getattr(err, attribute)[len('No module named '):] + \ + '" is missing for multipart support ' + + +DEFAULT_REGION = 'us-east-1' + +usage_string = """ +SYNOPSIS + s3put [-a/--access_key ] [-s/--secret_key ] + -b/--bucket [-c/--callback ] + [-d/--debug ] [-i/--ignore ] + [-n/--no_op] [-p/--prefix ] [-k/--key_prefix ] + [-q/--quiet] [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced] + [--header] [--region ] [--host ]""" + \ + usage_flag_multipart_capable + """ path [path...] + + Where + access_key - Your AWS Access Key ID. If not supplied, boto will + use the value of the environment variable + AWS_ACCESS_KEY_ID + secret_key - Your AWS Secret Access Key. If not supplied, boto + will use the value of the environment variable + AWS_SECRET_ACCESS_KEY + bucket_name - The name of the S3 bucket the file(s) should be + copied to. + path - A path to a directory or file that represents the items + to be uploaded. If the path points to an individual file, + that file will be uploaded to the specified bucket. If the + path points to a directory, it will recursively traverse + the directory and upload all files to the specified bucket. + debug_level - 0 means no debug output (default), 1 means normal + debug output from boto, and 2 means boto debug output + plus request/response output from httplib + ignore_dirs - a comma-separated list of directory names that will + be ignored and not uploaded to S3. + num_cb - The number of progress callbacks to display. The default + is zero which means no callbacks. If you supplied a value + of "-c 10" for example, the progress callback would be + called 10 times for each file transferred. + prefix - A file path prefix that will be stripped from the full + path of the file when determining the key name in S3. + For example, if the full path of a file is: + /home/foo/bar/fie.baz + and the prefix is specified as "-p /home/foo/" the + resulting key name in S3 will be: + /bar/fie.baz + The prefix must end in a trailing separator and if it + does not then one will be added. + key_prefix - A prefix to be added to the S3 key name, after any + stripping of the file path is done based on the + "-p/--prefix" option. + reduced - Use Reduced Redundancy storage + grant - A canned ACL policy that will be granted on each file + transferred to S3. The value of provided must be one + of the "canned" ACL policies supported by S3: + private|public-read|public-read-write|authenticated-read + no_overwrite - No files will be overwritten on S3, if the file/key + exists on s3 it will be kept. This is useful for + resuming interrupted transfers. Note this is not a + sync, even if the file has been updated locally if + the key exists on s3 the file on s3 will not be + updated. + header - key=value pairs of extra header(s) to pass along in the + request + region - Manually set a region for buckets that are not in the US + classic region. Normally the region is autodetected, but + setting this yourself is more efficient. + host - Hostname override, for using an endpoint other then AWS S3 +""" + usage_string_multipart_capable + """ + + + If the -n option is provided, no files will be transferred to S3 but + informational messages will be printed about what would happen. +""" + + +def usage(status=1): + print(usage_string) + sys.exit(status) + + +def submit_cb(bytes_so_far, total_bytes): + print('%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes)) + + +def get_key_name(fullpath, prefix, key_prefix): + if fullpath.startswith(prefix): + key_name = fullpath[len(prefix):] + else: + key_name = fullpath + l = key_name.split(os.sep) + return key_prefix + '/'.join(l) + + +def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num, + source_path, offset, bytes, debug, cb, num_cb, + amount_of_retries=10): + """ + Uploads a part with retries. + """ + if debug == 1: + print("_upload_part(%s, %s, %s)" % (source_path, offset, bytes)) + + def _upload(retries_left=amount_of_retries): + try: + if debug == 1: + print('Start uploading part #%d ...' % part_num) + conn = S3Connection(aws_key, aws_secret) + conn.debug = debug + bucket = conn.get_bucket(bucketname) + for mp in bucket.get_all_multipart_uploads(): + if mp.id == multipart_id: + with FileChunkIO(source_path, 'r', offset=offset, + bytes=bytes) as fp: + mp.upload_part_from_file(fp=fp, part_num=part_num, + cb=cb, num_cb=num_cb) + break + except Exception as exc: + if retries_left: + _upload(retries_left=retries_left - 1) + else: + print('Failed uploading part #%d' % part_num) + raise exc + else: + if debug == 1: + print('... Uploaded part #%d' % part_num) + + _upload() + +def check_valid_region(conn, region): + if conn is None: + print('Invalid region (%s)' % region) + sys.exit(1) + +def multipart_upload(bucketname, aws_key, aws_secret, source_path, keyname, + reduced, debug, cb, num_cb, acl='private', headers={}, + guess_mimetype=True, parallel_processes=4, + region=DEFAULT_REGION): + """ + Parallel multipart upload. + """ + conn = boto.s3.connect_to_region(region, aws_access_key_id=aws_key, + aws_secret_access_key=aws_secret) + check_valid_region(conn, region) + conn.debug = debug + bucket = conn.get_bucket(bucketname) + + if guess_mimetype: + mtype = mimetypes.guess_type(keyname)[0] or 'application/octet-stream' + headers.update({'Content-Type': mtype}) + + mp = bucket.initiate_multipart_upload(keyname, headers=headers, + reduced_redundancy=reduced) + + source_size = os.stat(source_path).st_size + bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)), + 5242880) + chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk))) + + pool = Pool(processes=parallel_processes) + for i in range(chunk_amount): + offset = i * bytes_per_chunk + remaining_bytes = source_size - offset + bytes = min([bytes_per_chunk, remaining_bytes]) + part_num = i + 1 + pool.apply_async(_upload_part, [bucketname, aws_key, aws_secret, mp.id, + part_num, source_path, offset, bytes, + debug, cb, num_cb]) + pool.close() + pool.join() + + if len(mp.get_all_parts()) == chunk_amount: + mp.complete_upload() + key = bucket.get_key(keyname) + key.set_acl(acl) + else: + mp.cancel_upload() + + +def singlepart_upload(bucket, key_name, fullpath, *kargs, **kwargs): + """ + Single upload. + """ + k = bucket.new_key(key_name) + k.set_contents_from_filename(fullpath, *kargs, **kwargs) + + +def expand_path(path): + path = os.path.expanduser(path) + path = os.path.expandvars(path) + return os.path.abspath(path) + + +def main(): + + # default values + aws_access_key_id = None + aws_secret_access_key = None + bucket_name = '' + ignore_dirs = [] + debug = 0 + cb = None + num_cb = 0 + quiet = False + no_op = False + prefix = '/' + key_prefix = '' + grant = None + no_overwrite = False + reduced = False + headers = {} + host = None + multipart_requested = False + region = None + + try: + opts, args = getopt.getopt( + sys.argv[1:], 'a:b:c::d:g:hi:k:np:qs:wr', + ['access_key=', 'bucket=', 'callback=', 'debug=', 'help', 'grant=', + 'ignore=', 'key_prefix=', 'no_op', 'prefix=', 'quiet', + 'secret_key=', 'no_overwrite', 'reduced', 'header=', 'multipart', + 'host=', 'region=']) + except: + usage(1) + + # parse opts + for o, a in opts: + if o in ('-h', '--help'): + usage(0) + if o in ('-a', '--access_key'): + aws_access_key_id = a + if o in ('-b', '--bucket'): + bucket_name = a + if o in ('-c', '--callback'): + num_cb = int(a) + cb = submit_cb + if o in ('-d', '--debug'): + debug = int(a) + if o in ('-g', '--grant'): + grant = a + if o in ('-i', '--ignore'): + ignore_dirs = a.split(',') + if o in ('-n', '--no_op'): + no_op = True + if o in ('-w', '--no_overwrite'): + no_overwrite = True + if o in ('-p', '--prefix'): + prefix = a + if prefix[-1] != os.sep: + prefix = prefix + os.sep + prefix = expand_path(prefix) + if o in ('-k', '--key_prefix'): + key_prefix = a + if o in ('-q', '--quiet'): + quiet = True + if o in ('-s', '--secret_key'): + aws_secret_access_key = a + if o in ('-r', '--reduced'): + reduced = True + if o == '--header': + (k, v) = a.split("=", 1) + headers[k] = v + if o == '--host': + host = a + if o == '--multipart': + if multipart_capable: + multipart_requested = True + else: + print("multipart upload requested but not capable") + sys.exit(4) + if o == '--region': + regions = boto.s3.regions() + for region_info in regions: + if region_info.name == a: + region = a + break + else: + raise ValueError('Invalid region %s specified' % a) + + if len(args) < 1: + usage(2) + + if not bucket_name: + print("bucket name is required!") + usage(3) + + connect_args = { + 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key + } + + if host: + connect_args['host'] = host + + c = boto.s3.connect_to_region(region or DEFAULT_REGION, **connect_args) + check_valid_region(c, region or DEFAULT_REGION) + c.debug = debug + b = c.get_bucket(bucket_name, validate=False) + + # Attempt to determine location and warn if no --host or --region + # arguments were passed. Then try to automagically figure out + # what should have been passed and fix it. + if host is None and region is None: + try: + location = b.get_location() + + # Classic region will be '', any other will have a name + if location: + print('Bucket exists in %s but no host or region given!' % location) + + # Override for EU, which is really Ireland according to the docs + if location == 'EU': + location = 'eu-west-1' + + print('Automatically setting region to %s' % location) + + # Here we create a new connection, and then take the existing + # bucket and set it to use the new connection + c = boto.s3.connect_to_region(location, **connect_args) + c.debug = debug + b.connection = c + except Exception as e: + if debug > 0: + print(e) + print('Could not get bucket region info, skipping...') + + existing_keys_to_check_against = [] + files_to_check_for_upload = [] + + for path in args: + path = expand_path(path) + # upload a directory of files recursively + if os.path.isdir(path): + if no_overwrite: + if not quiet: + print('Getting list of existing keys to check against') + for key in b.list(get_key_name(path, prefix, key_prefix)): + existing_keys_to_check_against.append(key.name) + for root, dirs, files in os.walk(path): + for ignore in ignore_dirs: + if ignore in dirs: + dirs.remove(ignore) + for path in files: + if path.startswith("."): + continue + files_to_check_for_upload.append(os.path.join(root, path)) + + # upload a single file + elif os.path.isfile(path): + fullpath = os.path.abspath(path) + key_name = get_key_name(fullpath, prefix, key_prefix) + files_to_check_for_upload.append(fullpath) + existing_keys_to_check_against.append(key_name) + + # we are trying to upload something unknown + else: + print("I don't know what %s is, so i can't upload it" % path) + + for fullpath in files_to_check_for_upload: + key_name = get_key_name(fullpath, prefix, key_prefix) + + if no_overwrite and key_name in existing_keys_to_check_against: + if b.get_key(key_name): + if not quiet: + print('Skipping %s as it exists in s3' % fullpath) + continue + + if not quiet: + print('Copying %s to %s/%s' % (fullpath, bucket_name, key_name)) + + if not no_op: + # 0-byte files don't work and also don't need multipart upload + if os.stat(fullpath).st_size != 0 and multipart_capable and \ + multipart_requested: + multipart_upload(bucket_name, aws_access_key_id, + aws_secret_access_key, fullpath, key_name, + reduced, debug, cb, num_cb, + grant or 'private', headers, + region=region or DEFAULT_REGION) + else: + singlepart_upload(b, key_name, fullpath, cb=cb, num_cb=num_cb, + policy=grant, reduced_redundancy=reduced, + headers=headers) + +if __name__ == "__main__": + main() diff --git a/desktop/core/ext-py/boto-2.38.0/bin/sdbadmin b/desktop/core/ext-py/boto-2.38.0/bin/sdbadmin new file mode 100755 index 0000000000000000000000000000000000000000..9e2448c5310a654e71744fae8f3ad7d026bbc936 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/sdbadmin @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# Copyright (c) 2009 Chris Moyer http://kopertop.blogspot.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +# +# Tools to dump and recover an SDB domain +# +VERSION = "%prog version 1.0" +import boto +import time +from boto import sdb +from boto.compat import json + +def choice_input(options, default=None, title=None): + """ + Choice input + """ + if title == None: + title = "Please choose" + print title + objects = [] + for n, obj in enumerate(options): + print "%s: %s" % (n, obj) + objects.append(obj) + choice = int(raw_input(">>> ")) + try: + choice = objects[choice] + except: + choice = default + return choice + +def confirm(message="Are you sure?"): + choice = raw_input("%s [yN] " % message) + return choice and len(choice) > 0 and choice[0].lower() == "y" + + +def dump_db(domain, file_name, use_json=False, sort_attributes=False): + """ + Dump SDB domain to file + """ + f = open(file_name, "w") + if use_json: + for item in domain: + data = {"name": item.name, "attributes": item} + print >> f, json.dumps(data, sort_keys=sort_attributes) + else: + doc = domain.to_xml(f) + +def empty_db(domain): + """ + Remove all entries from domain + """ + for item in domain: + item.delete() + +def load_db(domain, file, use_json=False): + """ + Load a domain from a file, this doesn't overwrite any existing + data in the file so if you want to do a full recovery and restore + you need to call empty_db before calling this + + :param domain: The SDB Domain object to load to + :param file: The File to load the DB from + """ + if use_json: + for line in file.readlines(): + if line: + data = json.loads(line) + item = domain.new_item(data['name']) + item.update(data['attributes']) + item.save() + + else: + domain.from_xml(file) + +def check_valid_region(conn, region): + if conn is None: + print 'Invalid region (%s)' % region + sys.exit(1) + +def create_db(domain_name, region_name): + """Create a new DB + + :param domain: Name of the domain to create + :type domain: str + """ + sdb = boto.sdb.connect_to_region(region_name) + check_valid_region(sdb, region_name) + return sdb.create_domain(domain_name) + +if __name__ == "__main__": + from optparse import OptionParser + parser = OptionParser(version=VERSION, usage="Usage: %prog [--dump|--load|--empty|--list|-l] [options]") + + # Commands + parser.add_option("--dump", help="Dump domain to file", dest="dump", default=False, action="store_true") + parser.add_option("--load", help="Load domain contents from file", dest="load", default=False, action="store_true") + parser.add_option("--empty", help="Empty all contents of domain", dest="empty", default=False, action="store_true") + parser.add_option("-l", "--list", help="List All domains", dest="list", default=False, action="store_true") + parser.add_option("-c", "--create", help="Create domain", dest="create", default=False, action="store_true") + + parser.add_option("-a", "--all-domains", help="Operate on all domains", action="store_true", default=False, dest="all_domains") + if json: + parser.add_option("-j", "--use-json", help="Load/Store as JSON instead of XML", action="store_true", default=False, dest="json") + parser.add_option("-s", "--sort-attibutes", help="Sort the element attributes", action="store_true", default=False, dest="sort_attributes") + parser.add_option("-d", "--domain", help="Do functions on domain (may be more then one)", action="append", dest="domains") + parser.add_option("-f", "--file", help="Input/Output file we're operating on", dest="file_name") + parser.add_option("-r", "--region", help="Region (e.g. us-east-1[default] or eu-west-1)", default="us-east-1", dest="region_name") + (options, args) = parser.parse_args() + + if options.create: + for domain_name in options.domains: + create_db(domain_name, options.region_name) + exit() + + sdb = boto.sdb.connect_to_region(options.region_name) + check_valid_region(sdb, options.region_name) + if options.list: + for db in sdb.get_all_domains(): + print db + exit() + + if not options.dump and not options.load and not options.empty: + parser.print_help() + exit() + + + + + # + # Setup + # + if options.domains: + domains = [] + for domain_name in options.domains: + domains.append(sdb.get_domain(domain_name)) + elif options.all_domains: + domains = sdb.get_all_domains() + else: + domains = [choice_input(options=sdb.get_all_domains(), title="No domain specified, please choose one")] + + + # + # Execute the commands + # + stime = time.time() + if options.empty: + if confirm("WARNING!!! Are you sure you want to empty the following domains?: %s" % domains): + stime = time.time() + for domain in domains: + print "--------> Emptying %s <--------" % domain.name + empty_db(domain) + else: + print "Canceling operations" + exit() + + if options.dump: + for domain in domains: + print "--------> Dumping %s <---------" % domain.name + if options.file_name: + file_name = options.file_name + else: + file_name = "%s.db" % domain.name + dump_db(domain, file_name, options.json, options.sort_attributes) + + if options.load: + for domain in domains: + print "---------> Loading %s <----------" % domain.name + if options.file_name: + file_name = options.file_name + else: + file_name = "%s.db" % domain.name + load_db(domain, open(file_name, "rb"), options.json) + + + total_time = round(time.time() - stime, 2) + print "--------> Finished in %s <--------" % total_time diff --git a/desktop/core/ext-py/boto-2.38.0/bin/taskadmin b/desktop/core/ext-py/boto-2.38.0/bin/taskadmin new file mode 100755 index 0000000000000000000000000000000000000000..5d5302adc910c4e94d23123d95e8453427f60e02 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/bin/taskadmin @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# Copyright (c) 2009 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +# +# Task/Job Administration utility +# +VERSION="0.1" +__version__ = VERSION +usage = """%prog [options] [command] +Commands: + list|ls List all Tasks in SDB + delete Delete Task with id + get Get Task + create|mk Create a new Task with command running every +""" + +def list(): + """List all Tasks in SDB""" + from boto.manage.task import Task + print "%-8s %-40s %s" % ("Hour", "Name", "Command") + print "-"*100 + for t in Task.all(): + print "%-8s %-40s %s" % (t.hour, t.name, t.command) + +def get(name): + """Get a task + :param name: The name of the task to fetch + :type name: str + """ + from boto.manage.task import Task + q = Task.find() + q.filter("name like", "%s%%" % name) + for t in q: + print "="*80 + print "| ", t.id + print "|%s" % ("-"*79) + print "| Name: ", t.name + print "| Hour: ", t.hour + print "| Command: ", t.command + if t.last_executed: + print "| Last Run: ", t.last_executed.ctime() + print "| Last Status: ", t.last_status + print "| Last Run Log: ", t.last_output + print "="*80 + +def delete(id): + from boto.manage.task import Task + t = Task.get_by_id(id) + print "Deleting task: %s" % t.name + if raw_input("Are you sure? ").lower() in ["y", "yes"]: + t.delete() + print "Deleted" + else: + print "Canceled" + +def create(name, hour, command): + """Create a new task + :param name: Name of the task to create + :type name: str + :param hour: What hour to run it at, "*" for every hour + :type hour: str + :param command: The command to execute + :type command: str + """ + from boto.manage.task import Task + t = Task() + t.name = name + t.hour = hour + t.command = command + t.put() + print "Created task: %s" % t.id + +if __name__ == "__main__": + try: + import readline + except ImportError: + pass + import boto + import sys + from optparse import OptionParser + from boto.mashups.iobject import IObject + parser = OptionParser(version=__version__, usage=usage) + + (options, args) = parser.parse_args() + + if len(args) < 1: + parser.print_help() + sys.exit(1) + + command = args[0].lower() + if command in ("ls", "list"): + list() + elif command == "get": + get(args[1]) + elif command == "create": + create(args[1], args[2], args[3]) + elif command == "delete": + delete(args[1]) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..75082c9312261c2d4f7e32d417ee28fdc438b832 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/__init__.py @@ -0,0 +1,1216 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# Copyright (c) 2010, Google, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.config import Config, BotoConfigLocations +from boto.storage_uri import BucketStorageUri, FileStorageUri +import boto.plugin +import datetime +import os +import platform +import re +import sys +import logging +import logging.config + +from boto.compat import urlparse +from boto.exception import InvalidUriError + +__version__ = '2.38.0' +Version = __version__ # for backware compatibility + +# http://bugs.python.org/issue7980 +datetime.datetime.strptime('', '') + +UserAgent = 'Boto/%s Python/%s %s/%s' % ( + __version__, + platform.python_version(), + platform.system(), + platform.release() +) +config = Config() + +# Regex to disallow buckets violating charset or not [3..255] chars total. +BUCKET_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,253}[a-zA-Z0-9]$') +# Regex to disallow buckets with individual DNS labels longer than 63. +TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}') +GENERATION_RE = re.compile(r'(?P.+)' + r'#(?P[0-9]+)$') +VERSION_RE = re.compile('(?P.+)#(?P.+)$') +ENDPOINTS_PATH = os.path.join(os.path.dirname(__file__), 'endpoints.json') + + +def init_logging(): + for file in BotoConfigLocations: + try: + logging.config.fileConfig(os.path.expanduser(file)) + except: + pass + + +class NullHandler(logging.Handler): + def emit(self, record): + pass + +log = logging.getLogger('boto') +perflog = logging.getLogger('boto.perf') +log.addHandler(NullHandler()) +perflog.addHandler(NullHandler()) +init_logging() + +# convenience function to set logging to a particular file + + +def set_file_logger(name, filepath, level=logging.INFO, format_string=None): + global log + if not format_string: + format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s" + logger = logging.getLogger(name) + logger.setLevel(level) + fh = logging.FileHandler(filepath) + fh.setLevel(level) + formatter = logging.Formatter(format_string) + fh.setFormatter(formatter) + logger.addHandler(fh) + log = logger + + +def set_stream_logger(name, level=logging.DEBUG, format_string=None): + global log + if not format_string: + format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s" + logger = logging.getLogger(name) + logger.setLevel(level) + fh = logging.StreamHandler() + fh.setLevel(level) + formatter = logging.Formatter(format_string) + fh.setFormatter(formatter) + logger.addHandler(fh) + log = logger + + +def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.sqs.connection.SQSConnection` + :return: A connection to Amazon's SQS + """ + from boto.sqs.connection import SQSConnection + return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.s3.connection.S3Connection` + :return: A connection to Amazon's S3 + """ + from boto.s3.connection import S3Connection + return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs): + """ + @type gs_access_key_id: string + @param gs_access_key_id: Your Google Cloud Storage Access Key ID + + @type gs_secret_access_key: string + @param gs_secret_access_key: Your Google Cloud Storage Secret Access Key + + @rtype: L{GSConnection} + @return: A connection to Google's Storage service + """ + from boto.gs.connection import GSConnection + return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs) + + +def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.connection.EC2Connection` + :return: A connection to Amazon's EC2 + """ + from boto.ec2.connection import EC2Connection + return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.elb.ELBConnection` + :return: A connection to Amazon's Load Balancing Service + """ + from boto.ec2.elb import ELBConnection + return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.autoscale.AutoScaleConnection` + :return: A connection to Amazon's Auto Scaling Service + + :type use_block_device_types bool + :param use_block_device_types: Specifies whether to return described Launch Configs with block device mappings containing + block device types, or a list of old style block device mappings (deprecated). This defaults to false for compatability + with the old incorrect style. + """ + from boto.ec2.autoscale import AutoScaleConnection + return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection` + :return: A connection to Amazon's EC2 Monitoring service + """ + from boto.ec2.cloudwatch import CloudWatchConnection + return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.sdb.connection.SDBConnection` + :return: A connection to Amazon's SDB + """ + from boto.sdb.connection import SDBConnection + return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.fps.connection.FPSConnection` + :return: A connection to FPS + """ + from boto.fps.connection import FPSConnection + return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.mturk.connection.MTurkConnection` + :return: A connection to MTurk + """ + from boto.mturk.connection import MTurkConnection + return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.fps.connection.FPSConnection` + :return: A connection to FPS + """ + from boto.cloudfront import CloudFrontConnection + return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.vpc.VPCConnection` + :return: A connection to VPC + """ + from boto.vpc import VPCConnection + return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.rds.RDSConnection` + :return: A connection to RDS + """ + from boto.rds import RDSConnection + return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_rds2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.rds2.layer1.RDSConnection` + :return: A connection to RDS + """ + from boto.rds2.layer1 import RDSConnection + return RDSConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.emr.EmrConnection` + :return: A connection to Elastic mapreduce + """ + from boto.emr import EmrConnection + return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.sns.SNSConnection` + :return: A connection to Amazon's SNS + """ + from boto.sns import SNSConnection + return SNSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_iam(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.iam.IAMConnection` + :return: A connection to Amazon's IAM + """ + from boto.iam import IAMConnection + return IAMConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_route53(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.dns.Route53Connection` + :return: A connection to Amazon's Route53 DNS Service + """ + from boto.route53 import Route53Connection + return Route53Connection(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.cloudformation.CloudFormationConnection` + :return: A connection to Amazon's CloudFormation Service + """ + from boto.cloudformation import CloudFormationConnection + return CloudFormationConnection(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None, + port=8773, path='/services/Eucalyptus', is_secure=False, + **kwargs): + """ + Connect to a Eucalyptus service. + + :type host: string + :param host: the host name or ip address of the Eucalyptus server + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.connection.EC2Connection` + :return: A connection to Eucalyptus server + """ + from boto.ec2 import EC2Connection + from boto.ec2.regioninfo import RegionInfo + + # Check for values in boto config, if not supplied as args + if not aws_access_key_id: + aws_access_key_id = config.get('Credentials', + 'euca_access_key_id', + None) + if not aws_secret_access_key: + aws_secret_access_key = config.get('Credentials', + 'euca_secret_access_key', + None) + if not host: + host = config.get('Boto', 'eucalyptus_host', None) + + reg = RegionInfo(name='eucalyptus', endpoint=host) + return EC2Connection(aws_access_key_id, aws_secret_access_key, + region=reg, port=port, path=path, + is_secure=is_secure, **kwargs) + + +def connect_glacier(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.glacier.layer2.Layer2` + :return: A connection to Amazon's Glacier Service + """ + from boto.glacier.layer2 import Layer2 + return Layer2(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_ec2_endpoint(url, aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to an EC2 Api endpoint. Additional arguments are passed + through to connect_ec2. + + :type url: string + :param url: A url for the ec2 api endpoint to connect to + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.connection.EC2Connection` + :return: A connection to Eucalyptus server + """ + from boto.ec2.regioninfo import RegionInfo + + purl = urlparse(url) + kwargs['port'] = purl.port + kwargs['host'] = purl.hostname + kwargs['path'] = purl.path + if not 'is_secure' in kwargs: + kwargs['is_secure'] = (purl.scheme == "https") + + kwargs['region'] = RegionInfo(name=purl.hostname, + endpoint=purl.hostname) + kwargs['aws_access_key_id'] = aws_access_key_id + kwargs['aws_secret_access_key'] = aws_secret_access_key + + return(connect_ec2(**kwargs)) + + +def connect_walrus(host=None, aws_access_key_id=None, + aws_secret_access_key=None, + port=8773, path='/services/Walrus', is_secure=False, + **kwargs): + """ + Connect to a Walrus service. + + :type host: string + :param host: the host name or ip address of the Walrus server + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.s3.connection.S3Connection` + :return: A connection to Walrus + """ + from boto.s3.connection import S3Connection + from boto.s3.connection import OrdinaryCallingFormat + + # Check for values in boto config, if not supplied as args + if not aws_access_key_id: + aws_access_key_id = config.get('Credentials', + 'euca_access_key_id', + None) + if not aws_secret_access_key: + aws_secret_access_key = config.get('Credentials', + 'euca_secret_access_key', + None) + if not host: + host = config.get('Boto', 'walrus_host', None) + + return S3Connection(aws_access_key_id, aws_secret_access_key, + host=host, port=port, path=path, + calling_format=OrdinaryCallingFormat(), + is_secure=is_secure, **kwargs) + + +def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ses.SESConnection` + :return: A connection to Amazon's SES + """ + from boto.ses import SESConnection + return SESConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.sts.STSConnection` + :return: A connection to Amazon's STS + """ + from boto.sts import STSConnection + return STSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_ia(ia_access_key_id=None, ia_secret_access_key=None, + is_secure=False, **kwargs): + """ + Connect to the Internet Archive via their S3-like API. + + :type ia_access_key_id: string + :param ia_access_key_id: Your IA Access Key ID. This will also look + in your boto config file for an entry in the Credentials + section called "ia_access_key_id" + + :type ia_secret_access_key: string + :param ia_secret_access_key: Your IA Secret Access Key. This will also + look in your boto config file for an entry in the Credentials + section called "ia_secret_access_key" + + :rtype: :class:`boto.s3.connection.S3Connection` + :return: A connection to the Internet Archive + """ + from boto.s3.connection import S3Connection + from boto.s3.connection import OrdinaryCallingFormat + + access_key = config.get('Credentials', 'ia_access_key_id', + ia_access_key_id) + secret_key = config.get('Credentials', 'ia_secret_access_key', + ia_secret_access_key) + + return S3Connection(access_key, secret_key, + host='s3.us.archive.org', + calling_format=OrdinaryCallingFormat(), + is_secure=is_secure, **kwargs) + + +def connect_dynamodb(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.dynamodb.layer2.Layer2` + :return: A connection to the Layer2 interface for DynamoDB. + """ + from boto.dynamodb.layer2 import Layer2 + return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_swf(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.swf.layer1.Layer1` + :return: A connection to the Layer1 interface for SWF. + """ + from boto.swf.layer1 import Layer1 + return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_cloudsearch(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.cloudsearch.layer2.Layer2` + :return: A connection to Amazon's CloudSearch service + """ + from boto.cloudsearch.layer2 import Layer2 + return Layer2(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_cloudsearch2(aws_access_key_id=None, + aws_secret_access_key=None, + sign_request=False, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :type sign_request: bool + :param sign_request: whether or not to sign search and + upload requests + + :rtype: :class:`boto.cloudsearch2.layer2.Layer2` + :return: A connection to Amazon's CloudSearch2 service + """ + from boto.cloudsearch2.layer2 import Layer2 + return Layer2(aws_access_key_id, aws_secret_access_key, + sign_request=sign_request, + **kwargs) + + +def connect_cloudsearchdomain(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.cloudsearchdomain.layer1.CloudSearchDomainConnection` + :return: A connection to Amazon's CloudSearch Domain service + """ + from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + return CloudSearchDomainConnection(aws_access_key_id, + aws_secret_access_key, **kwargs) + + +def connect_beanstalk(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.beanstalk.layer1.Layer1` + :return: A connection to Amazon's Elastic Beanstalk service + """ + from boto.beanstalk.layer1 import Layer1 + return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_elastictranscoder(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ets.layer1.ElasticTranscoderConnection` + :return: A connection to Amazon's Elastic Transcoder service + """ + from boto.elastictranscoder.layer1 import ElasticTranscoderConnection + return ElasticTranscoderConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs) + + +def connect_opsworks(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + from boto.opsworks.layer1 import OpsWorksConnection + return OpsWorksConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs) + + +def connect_redshift(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.redshift.layer1.RedshiftConnection` + :return: A connection to Amazon's Redshift service + """ + from boto.redshift.layer1 import RedshiftConnection + return RedshiftConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_support(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.support.layer1.SupportConnection` + :return: A connection to Amazon's Support service + """ + from boto.support.layer1 import SupportConnection + return SupportConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_cloudtrail(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS CloudTrail + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.cloudtrail.layer1.CloudtrailConnection` + :return: A connection to the AWS Cloudtrail service + """ + from boto.cloudtrail.layer1 import CloudTrailConnection + return CloudTrailConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_directconnect(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS DirectConnect + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.directconnect.layer1.DirectConnectConnection` + :return: A connection to the AWS DirectConnect service + """ + from boto.directconnect.layer1 import DirectConnectConnection + return DirectConnectConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + +def connect_kinesis(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon Kinesis + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.kinesis.layer1.KinesisConnection` + :return: A connection to the Amazon Kinesis service + """ + from boto.kinesis.layer1 import KinesisConnection + return KinesisConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + +def connect_logs(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon CloudWatch Logs + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.kinesis.layer1.CloudWatchLogsConnection` + :return: A connection to the Amazon CloudWatch Logs service + """ + from boto.logs.layer1 import CloudWatchLogsConnection + return CloudWatchLogsConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_route53domains(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon Route 53 Domains + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.route53.domains.layer1.Route53DomainsConnection` + :return: A connection to the Amazon Route 53 Domains service + """ + from boto.route53.domains.layer1 import Route53DomainsConnection + return Route53DomainsConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_cognito_identity(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon Cognito Identity + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.cognito.identity.layer1.CognitoIdentityConnection` + :return: A connection to the Amazon Cognito Identity service + """ + from boto.cognito.identity.layer1 import CognitoIdentityConnection + return CognitoIdentityConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_cognito_sync(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon Cognito Sync + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.cognito.sync.layer1.CognitoSyncConnection` + :return: A connection to the Amazon Cognito Sync service + """ + from boto.cognito.sync.layer1 import CognitoSyncConnection + return CognitoSyncConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_kms(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS Key Management Service + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.kms.layer1.KMSConnection` + :return: A connection to the AWS Key Management Service + """ + from boto.kms.layer1 import KMSConnection + return KMSConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_awslambda(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS Lambda + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.awslambda.layer1.AWSLambdaConnection` + :return: A connection to the AWS Lambda service + """ + from boto.awslambda.layer1 import AWSLambdaConnection + return AWSLambdaConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_codedeploy(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS CodeDeploy + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.cognito.sync.layer1.CodeDeployConnection` + :return: A connection to the AWS CodeDeploy service + """ + from boto.codedeploy.layer1 import CodeDeployConnection + return CodeDeployConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_configservice(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS Config + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.kms.layer1.ConfigServiceConnection` + :return: A connection to the AWS Config service + """ + from boto.configservice.layer1 import ConfigServiceConnection + return ConfigServiceConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_cloudhsm(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS CloudHSM + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.cloudhsm.layer1.CloudHSMConnection` + :return: A connection to the AWS CloudHSM service + """ + from boto.cloudhsm.layer1 import CloudHSMConnection + return CloudHSMConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_ec2containerservice(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon EC2 Container Service + rtype: :class:`boto.ec2containerservice.layer1.EC2ContainerServiceConnection` + :return: A connection to the Amazon EC2 Container Service + """ + from boto.ec2containerservice.layer1 import EC2ContainerServiceConnection + return EC2ContainerServiceConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_machinelearning(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon Machine Learning service + rtype: :class:`boto.machinelearning.layer1.MachineLearningConnection` + :return: A connection to the Amazon Machine Learning service + """ + from boto.machinelearning.layer1 import MachineLearningConnection + return MachineLearningConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def storage_uri(uri_str, default_scheme='file', debug=0, validate=True, + bucket_storage_uri_class=BucketStorageUri, + suppress_consec_slashes=True, is_latest=False): + """ + Instantiate a StorageUri from a URI string. + + :type uri_str: string + :param uri_str: URI naming bucket + optional object. + :type default_scheme: string + :param default_scheme: default scheme for scheme-less URIs. + :type debug: int + :param debug: debug level to pass in to boto connection (range 0..2). + :type validate: bool + :param validate: whether to check for bucket name validity. + :type bucket_storage_uri_class: BucketStorageUri interface. + :param bucket_storage_uri_class: Allows mocking for unit tests. + :param suppress_consec_slashes: If provided, controls whether + consecutive slashes will be suppressed in key paths. + :type is_latest: bool + :param is_latest: whether this versioned object represents the + current version. + + We allow validate to be disabled to allow caller + to implement bucket-level wildcarding (outside the boto library; + see gsutil). + + :rtype: :class:`boto.StorageUri` subclass + :return: StorageUri subclass for given URI. + + ``uri_str`` must be one of the following formats: + + * gs://bucket/name + * gs://bucket/name#ver + * s3://bucket/name + * gs://bucket + * s3://bucket + * filename (which could be a Unix path like /a/b/c or a Windows path like + C:\a\b\c) + + The last example uses the default scheme ('file', unless overridden). + """ + version_id = None + generation = None + + # Manually parse URI components instead of using urlparse because + # what we're calling URIs don't really fit the standard syntax for URIs + # (the latter includes an optional host/net location part). + end_scheme_idx = uri_str.find('://') + if end_scheme_idx == -1: + scheme = default_scheme.lower() + path = uri_str + else: + scheme = uri_str[0:end_scheme_idx].lower() + path = uri_str[end_scheme_idx + 3:] + + if scheme not in ['file', 's3', 'gs']: + raise InvalidUriError('Unrecognized scheme "%s"' % scheme) + if scheme == 'file': + # For file URIs we have no bucket name, and use the complete path + # (minus 'file://') as the object name. + is_stream = False + if path == '-': + is_stream = True + return FileStorageUri(path, debug, is_stream) + else: + path_parts = path.split('/', 1) + bucket_name = path_parts[0] + object_name = '' + # If validate enabled, ensure the bucket name is valid, to avoid + # possibly confusing other parts of the code. (For example if we didn't + # catch bucket names containing ':', when a user tried to connect to + # the server with that name they might get a confusing error about + # non-integer port numbers.) + if (validate and bucket_name and + (not BUCKET_NAME_RE.match(bucket_name) + or TOO_LONG_DNS_NAME_COMP.search(bucket_name))): + raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str) + if scheme == 'gs': + match = GENERATION_RE.search(path) + if match: + md = match.groupdict() + versionless_uri_str = md['versionless_uri_str'] + path_parts = versionless_uri_str.split('/', 1) + generation = int(md['generation']) + elif scheme == 's3': + match = VERSION_RE.search(path) + if match: + md = match.groupdict() + versionless_uri_str = md['versionless_uri_str'] + path_parts = versionless_uri_str.split('/', 1) + version_id = md['version_id'] + else: + raise InvalidUriError('Unrecognized scheme "%s"' % scheme) + if len(path_parts) > 1: + object_name = path_parts[1] + return bucket_storage_uri_class( + scheme, bucket_name, object_name, debug, + suppress_consec_slashes=suppress_consec_slashes, + version_id=version_id, generation=generation, is_latest=is_latest) + + +def storage_uri_for_key(key): + """Returns a StorageUri for the given key. + + :type key: :class:`boto.s3.key.Key` or subclass + :param key: URI naming bucket + optional object. + """ + if not isinstance(key, boto.s3.key.Key): + raise InvalidUriError('Requested key (%s) is not a subclass of ' + 'boto.s3.key.Key' % str(type(key))) + prov_name = key.bucket.connection.provider.get_provider_name() + uri_str = '%s://%s/%s' % (prov_name, key.bucket.name, key.name) + return storage_uri(uri_str) + +boto.plugin.load_plugins(config) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/auth.py b/desktop/core/ext-py/boto-2.38.0/boto/auth.py new file mode 100644 index 0000000000000000000000000000000000000000..636dcfddb183121c49afe9cde7e7b657d8d56357 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/auth.py @@ -0,0 +1,1040 @@ +# Copyright 2010 Google Inc. +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +""" +Handles authentication required to AWS and GS +""" + +import base64 +import boto +import boto.auth_handler +import boto.exception +import boto.plugin +import boto.utils +import copy +import datetime +from email.utils import formatdate +import hmac +import os +import posixpath + +from boto.compat import urllib, encodebytes +from boto.auth_handler import AuthHandler +from boto.exception import BotoClientError + +try: + from hashlib import sha1 as sha + from hashlib import sha256 as sha256 +except ImportError: + import sha + sha256 = None + + +# Region detection strings to determine if SigV4 should be used +# by default. +SIGV4_DETECT = [ + '.cn-', + # In eu-central we support both host styles for S3 + '.eu-central', + '-eu-central', +] + + +class HmacKeys(object): + """Key based Auth handler helper.""" + + def __init__(self, host, config, provider): + if provider.access_key is None or provider.secret_key is None: + raise boto.auth_handler.NotReadyToAuthenticate() + self.host = host + self.update_provider(provider) + + def update_provider(self, provider): + self._provider = provider + self._hmac = hmac.new(self._provider.secret_key.encode('utf-8'), + digestmod=sha) + if sha256: + self._hmac_256 = hmac.new(self._provider.secret_key.encode('utf-8'), + digestmod=sha256) + else: + self._hmac_256 = None + + def algorithm(self): + if self._hmac_256: + return 'HmacSHA256' + else: + return 'HmacSHA1' + + def _get_hmac(self): + if self._hmac_256: + digestmod = sha256 + else: + digestmod = sha + return hmac.new(self._provider.secret_key.encode('utf-8'), + digestmod=digestmod) + + def sign_string(self, string_to_sign): + new_hmac = self._get_hmac() + new_hmac.update(string_to_sign.encode('utf-8')) + return encodebytes(new_hmac.digest()).decode('utf-8').strip() + + def __getstate__(self): + pickled_dict = copy.copy(self.__dict__) + del pickled_dict['_hmac'] + del pickled_dict['_hmac_256'] + return pickled_dict + + def __setstate__(self, dct): + self.__dict__ = dct + self.update_provider(self._provider) + + +class AnonAuthHandler(AuthHandler, HmacKeys): + """ + Implements Anonymous requests. + """ + + capability = ['anon'] + + def __init__(self, host, config, provider): + super(AnonAuthHandler, self).__init__(host, config, provider) + + def add_auth(self, http_request, **kwargs): + pass + + +class HmacAuthV1Handler(AuthHandler, HmacKeys): + """ Implements the HMAC request signing used by S3 and GS.""" + + capability = ['hmac-v1', 's3'] + + def __init__(self, host, config, provider): + AuthHandler.__init__(self, host, config, provider) + HmacKeys.__init__(self, host, config, provider) + self._hmac_256 = None + + def update_provider(self, provider): + super(HmacAuthV1Handler, self).update_provider(provider) + self._hmac_256 = None + + def add_auth(self, http_request, **kwargs): + headers = http_request.headers + method = http_request.method + auth_path = http_request.auth_path + if 'Date' not in headers: + headers['Date'] = formatdate(usegmt=True) + + if self._provider.security_token: + key = self._provider.security_token_header + headers[key] = self._provider.security_token + string_to_sign = boto.utils.canonical_string(method, auth_path, + headers, None, + self._provider) + boto.log.debug('StringToSign:\n%s' % string_to_sign) + b64_hmac = self.sign_string(string_to_sign) + auth_hdr = self._provider.auth_header + auth = ("%s %s:%s" % (auth_hdr, self._provider.access_key, b64_hmac)) + boto.log.debug('Signature:\n%s' % auth) + headers['Authorization'] = auth + + +class HmacAuthV2Handler(AuthHandler, HmacKeys): + """ + Implements the simplified HMAC authorization used by CloudFront. + """ + capability = ['hmac-v2', 'cloudfront'] + + def __init__(self, host, config, provider): + AuthHandler.__init__(self, host, config, provider) + HmacKeys.__init__(self, host, config, provider) + self._hmac_256 = None + + def update_provider(self, provider): + super(HmacAuthV2Handler, self).update_provider(provider) + self._hmac_256 = None + + def add_auth(self, http_request, **kwargs): + headers = http_request.headers + if 'Date' not in headers: + headers['Date'] = formatdate(usegmt=True) + if self._provider.security_token: + key = self._provider.security_token_header + headers[key] = self._provider.security_token + + b64_hmac = self.sign_string(headers['Date']) + auth_hdr = self._provider.auth_header + headers['Authorization'] = ("%s %s:%s" % + (auth_hdr, + self._provider.access_key, b64_hmac)) + + +class HmacAuthV3Handler(AuthHandler, HmacKeys): + """Implements the new Version 3 HMAC authorization used by Route53.""" + + capability = ['hmac-v3', 'route53', 'ses'] + + def __init__(self, host, config, provider): + AuthHandler.__init__(self, host, config, provider) + HmacKeys.__init__(self, host, config, provider) + + def add_auth(self, http_request, **kwargs): + headers = http_request.headers + if 'Date' not in headers: + headers['Date'] = formatdate(usegmt=True) + + if self._provider.security_token: + key = self._provider.security_token_header + headers[key] = self._provider.security_token + + b64_hmac = self.sign_string(headers['Date']) + s = "AWS3-HTTPS AWSAccessKeyId=%s," % self._provider.access_key + s += "Algorithm=%s,Signature=%s" % (self.algorithm(), b64_hmac) + headers['X-Amzn-Authorization'] = s + + +class HmacAuthV3HTTPHandler(AuthHandler, HmacKeys): + """ + Implements the new Version 3 HMAC authorization used by DynamoDB. + """ + + capability = ['hmac-v3-http'] + + def __init__(self, host, config, provider): + AuthHandler.__init__(self, host, config, provider) + HmacKeys.__init__(self, host, config, provider) + + def headers_to_sign(self, http_request): + """ + Select the headers from the request that need to be included + in the StringToSign. + """ + headers_to_sign = {'Host': self.host} + for name, value in http_request.headers.items(): + lname = name.lower() + if lname.startswith('x-amz'): + headers_to_sign[name] = value + return headers_to_sign + + def canonical_headers(self, headers_to_sign): + """ + Return the headers that need to be included in the StringToSign + in their canonical form by converting all header keys to lower + case, sorting them in alphabetical order and then joining + them into a string, separated by newlines. + """ + l = sorted(['%s:%s' % (n.lower().strip(), + headers_to_sign[n].strip()) for n in headers_to_sign]) + return '\n'.join(l) + + def string_to_sign(self, http_request): + """ + Return the canonical StringToSign as well as a dict + containing the original version of all headers that + were included in the StringToSign. + """ + headers_to_sign = self.headers_to_sign(http_request) + canonical_headers = self.canonical_headers(headers_to_sign) + string_to_sign = '\n'.join([http_request.method, + http_request.auth_path, + '', + canonical_headers, + '', + http_request.body]) + return string_to_sign, headers_to_sign + + def add_auth(self, req, **kwargs): + """ + Add AWS3 authentication to a request. + + :type req: :class`boto.connection.HTTPRequest` + :param req: The HTTPRequest object. + """ + # This could be a retry. Make sure the previous + # authorization header is removed first. + if 'X-Amzn-Authorization' in req.headers: + del req.headers['X-Amzn-Authorization'] + req.headers['X-Amz-Date'] = formatdate(usegmt=True) + if self._provider.security_token: + req.headers['X-Amz-Security-Token'] = self._provider.security_token + string_to_sign, headers_to_sign = self.string_to_sign(req) + boto.log.debug('StringToSign:\n%s' % string_to_sign) + hash_value = sha256(string_to_sign.encode('utf-8')).digest() + b64_hmac = self.sign_string(hash_value) + s = "AWS3 AWSAccessKeyId=%s," % self._provider.access_key + s += "Algorithm=%s," % self.algorithm() + s += "SignedHeaders=%s," % ';'.join(headers_to_sign) + s += "Signature=%s" % b64_hmac + req.headers['X-Amzn-Authorization'] = s + + +class HmacAuthV4Handler(AuthHandler, HmacKeys): + """ + Implements the new Version 4 HMAC authorization. + """ + + capability = ['hmac-v4'] + + def __init__(self, host, config, provider, + service_name=None, region_name=None): + AuthHandler.__init__(self, host, config, provider) + HmacKeys.__init__(self, host, config, provider) + # You can set the service_name and region_name to override the + # values which would otherwise come from the endpoint, e.g. + # ..amazonaws.com. + self.service_name = service_name + self.region_name = region_name + + def _sign(self, key, msg, hex=False): + if not isinstance(key, bytes): + key = key.encode('utf-8') + + if hex: + sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest() + else: + sig = hmac.new(key, msg.encode('utf-8'), sha256).digest() + return sig + + def headers_to_sign(self, http_request): + """ + Select the headers from the request that need to be included + in the StringToSign. + """ + host_header_value = self.host_header(self.host, http_request) + if http_request.headers.get('Host'): + host_header_value = http_request.headers['Host'] + headers_to_sign = {'Host': host_header_value} + for name, value in http_request.headers.items(): + lname = name.lower() + if lname.startswith('x-amz'): + if isinstance(value, bytes): + value = value.decode('utf-8') + headers_to_sign[name] = value + return headers_to_sign + + def host_header(self, host, http_request): + port = http_request.port + secure = http_request.protocol == 'https' + if ((port == 80 and not secure) or (port == 443 and secure)): + return host + return '%s:%s' % (host, port) + + def query_string(self, http_request): + parameter_names = sorted(http_request.params.keys()) + pairs = [] + for pname in parameter_names: + pval = boto.utils.get_utf8_value(http_request.params[pname]) + pairs.append(urllib.parse.quote(pname, safe='') + '=' + + urllib.parse.quote(pval, safe='-_~')) + return '&'.join(pairs) + + def canonical_query_string(self, http_request): + # POST requests pass parameters in through the + # http_request.body field. + if http_request.method == 'POST': + return "" + l = [] + for param in sorted(http_request.params): + value = boto.utils.get_utf8_value(http_request.params[param]) + l.append('%s=%s' % (urllib.parse.quote(param, safe='-_.~'), + urllib.parse.quote(value, safe='-_.~'))) + return '&'.join(l) + + def canonical_headers(self, headers_to_sign): + """ + Return the headers that need to be included in the StringToSign + in their canonical form by converting all header keys to lower + case, sorting them in alphabetical order and then joining + them into a string, separated by newlines. + """ + canonical = [] + + for header in headers_to_sign: + c_name = header.lower().strip() + raw_value = str(headers_to_sign[header]) + if '"' in raw_value: + c_value = raw_value.strip() + else: + c_value = ' '.join(raw_value.strip().split()) + canonical.append('%s:%s' % (c_name, c_value)) + return '\n'.join(sorted(canonical)) + + def signed_headers(self, headers_to_sign): + l = ['%s' % n.lower().strip() for n in headers_to_sign] + l = sorted(l) + return ';'.join(l) + + def canonical_uri(self, http_request): + path = http_request.auth_path + # Normalize the path + # in windows normpath('/') will be '\\' so we chane it back to '/' + normalized = posixpath.normpath(path).replace('\\', '/') + # Then urlencode whatever's left. + encoded = urllib.parse.quote(normalized) + if len(path) > 1 and path.endswith('/'): + encoded += '/' + return encoded + + def payload(self, http_request): + body = http_request.body + # If the body is a file like object, we can use + # boto.utils.compute_hash, which will avoid reading + # the entire body into memory. + if hasattr(body, 'seek') and hasattr(body, 'read'): + return boto.utils.compute_hash(body, hash_algorithm=sha256)[0] + elif not isinstance(body, bytes): + body = body.encode('utf-8') + return sha256(body).hexdigest() + + def canonical_request(self, http_request): + cr = [http_request.method.upper()] + cr.append(self.canonical_uri(http_request)) + cr.append(self.canonical_query_string(http_request)) + headers_to_sign = self.headers_to_sign(http_request) + cr.append(self.canonical_headers(headers_to_sign) + '\n') + cr.append(self.signed_headers(headers_to_sign)) + cr.append(self.payload(http_request)) + return '\n'.join(cr) + + def scope(self, http_request): + scope = [self._provider.access_key] + scope.append(http_request.timestamp) + scope.append(http_request.region_name) + scope.append(http_request.service_name) + scope.append('aws4_request') + return '/'.join(scope) + + def split_host_parts(self, host): + return host.split('.') + + def determine_region_name(self, host): + parts = self.split_host_parts(host) + if self.region_name is not None: + region_name = self.region_name + elif len(parts) > 1: + if parts[1] == 'us-gov': + region_name = 'us-gov-west-1' + else: + if len(parts) == 3: + region_name = 'us-east-1' + else: + region_name = parts[1] + else: + region_name = parts[0] + + return region_name + + def determine_service_name(self, host): + parts = self.split_host_parts(host) + if self.service_name is not None: + service_name = self.service_name + else: + service_name = parts[0] + return service_name + + def credential_scope(self, http_request): + scope = [] + http_request.timestamp = http_request.headers['X-Amz-Date'][0:8] + scope.append(http_request.timestamp) + # The service_name and region_name either come from: + # * The service_name/region_name attrs or (if these values are None) + # * parsed from the endpoint ..amazonaws.com. + region_name = self.determine_region_name(http_request.host) + service_name = self.determine_service_name(http_request.host) + http_request.service_name = service_name + http_request.region_name = region_name + + scope.append(http_request.region_name) + scope.append(http_request.service_name) + scope.append('aws4_request') + return '/'.join(scope) + + def string_to_sign(self, http_request, canonical_request): + """ + Return the canonical StringToSign as well as a dict + containing the original version of all headers that + were included in the StringToSign. + """ + sts = ['AWS4-HMAC-SHA256'] + sts.append(http_request.headers['X-Amz-Date']) + sts.append(self.credential_scope(http_request)) + sts.append(sha256(canonical_request.encode('utf-8')).hexdigest()) + return '\n'.join(sts) + + def signature(self, http_request, string_to_sign): + key = self._provider.secret_key + k_date = self._sign(('AWS4' + key).encode('utf-8'), + http_request.timestamp) + k_region = self._sign(k_date, http_request.region_name) + k_service = self._sign(k_region, http_request.service_name) + k_signing = self._sign(k_service, 'aws4_request') + return self._sign(k_signing, string_to_sign, hex=True) + + def add_auth(self, req, **kwargs): + """ + Add AWS4 authentication to a request. + + :type req: :class`boto.connection.HTTPRequest` + :param req: The HTTPRequest object. + """ + # This could be a retry. Make sure the previous + # authorization header is removed first. + if 'X-Amzn-Authorization' in req.headers: + del req.headers['X-Amzn-Authorization'] + now = datetime.datetime.utcnow() + req.headers['X-Amz-Date'] = now.strftime('%Y%m%dT%H%M%SZ') + if self._provider.security_token: + req.headers['X-Amz-Security-Token'] = self._provider.security_token + qs = self.query_string(req) + + qs_to_post = qs + + # We do not want to include any params that were mangled into + # the params if performing s3-sigv4 since it does not + # belong in the body of a post for some requests. Mangled + # refers to items in the query string URL being added to the + # http response params. However, these params get added to + # the body of the request, but the query string URL does not + # belong in the body of the request. ``unmangled_resp`` is the + # response that happened prior to the mangling. This ``unmangled_req`` + # kwarg will only appear for s3-sigv4. + if 'unmangled_req' in kwargs: + qs_to_post = self.query_string(kwargs['unmangled_req']) + + if qs_to_post and req.method == 'POST': + # Stash request parameters into post body + # before we generate the signature. + req.body = qs_to_post + req.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' + req.headers['Content-Length'] = str(len(req.body)) + else: + # Safe to modify req.path here since + # the signature will use req.auth_path. + req.path = req.path.split('?')[0] + + if qs: + # Don't insert the '?' unless there's actually a query string + req.path = req.path + '?' + qs + canonical_request = self.canonical_request(req) + boto.log.debug('CanonicalRequest:\n%s' % canonical_request) + string_to_sign = self.string_to_sign(req, canonical_request) + boto.log.debug('StringToSign:\n%s' % string_to_sign) + signature = self.signature(req, string_to_sign) + boto.log.debug('Signature:\n%s' % signature) + headers_to_sign = self.headers_to_sign(req) + l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(req)] + l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign)) + l.append('Signature=%s' % signature) + req.headers['Authorization'] = ','.join(l) + + +class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler): + """ + Implements a variant of Version 4 HMAC authorization specific to S3. + """ + capability = ['hmac-v4-s3'] + + def __init__(self, *args, **kwargs): + super(S3HmacAuthV4Handler, self).__init__(*args, **kwargs) + + if self.region_name: + self.region_name = self.clean_region_name(self.region_name) + + def clean_region_name(self, region_name): + if region_name.startswith('s3-'): + return region_name[3:] + + return region_name + + def canonical_uri(self, http_request): + # S3 does **NOT** do path normalization that SigV4 typically does. + # Urlencode the path, **NOT** ``auth_path`` (because vhosting). + path = urllib.parse.urlparse(http_request.path) + # Because some quoting may have already been applied, let's back it out. + unquoted = urllib.parse.unquote(path.path) + # Requote, this time addressing all characters. + encoded = urllib.parse.quote(unquoted) + return encoded + + def canonical_query_string(self, http_request): + # Note that we just do not return an empty string for + # POST request. Query strings in url are included in canonical + # query string. + l = [] + for param in sorted(http_request.params): + value = boto.utils.get_utf8_value(http_request.params[param]) + l.append('%s=%s' % (urllib.parse.quote(param, safe='-_.~'), + urllib.parse.quote(value, safe='-_.~'))) + return '&'.join(l) + + def host_header(self, host, http_request): + port = http_request.port + secure = http_request.protocol == 'https' + if ((port == 80 and not secure) or (port == 443 and secure)): + return http_request.host + return '%s:%s' % (http_request.host, port) + + def headers_to_sign(self, http_request): + """ + Select the headers from the request that need to be included + in the StringToSign. + """ + host_header_value = self.host_header(self.host, http_request) + headers_to_sign = {'Host': host_header_value} + for name, value in http_request.headers.items(): + lname = name.lower() + # Hooray for the only difference! The main SigV4 signer only does + # ``Host`` + ``x-amz-*``. But S3 wants pretty much everything + # signed, except for authorization itself. + if lname not in ['authorization']: + headers_to_sign[name] = value + return headers_to_sign + + def determine_region_name(self, host): + # S3's different format(s) of representing region/service from the + # rest of AWS makes this hurt too. + # + # Possible domain formats: + # - s3.amazonaws.com (Classic) + # - s3-us-west-2.amazonaws.com (Specific region) + # - bukkit.s3.amazonaws.com (Vhosted Classic) + # - bukkit.s3-ap-northeast-1.amazonaws.com (Vhosted specific region) + # - s3.cn-north-1.amazonaws.com.cn - (Beijing region) + # - bukkit.s3.cn-north-1.amazonaws.com.cn - (Vhosted Beijing region) + parts = self.split_host_parts(host) + + if self.region_name is not None: + region_name = self.region_name + else: + # Classic URLs - s3-us-west-2.amazonaws.com + if len(parts) == 3: + region_name = self.clean_region_name(parts[0]) + + # Special-case for Classic. + if region_name == 's3': + region_name = 'us-east-1' + else: + # Iterate over the parts in reverse order. + for offset, part in enumerate(reversed(parts)): + part = part.lower() + + # Look for the first thing starting with 's3'. + # Until there's a ``.s3`` TLD, we should be OK. :P + if part == 's3': + # If it's by itself, the region is the previous part. + region_name = parts[-offset] + + # Unless it's Vhosted classic + if region_name == 'amazonaws': + region_name = 'us-east-1' + + break + elif part.startswith('s3-'): + region_name = self.clean_region_name(part) + break + + return region_name + + def determine_service_name(self, host): + # Should this signing mechanism ever be used for anything else, this + # will fail. Consider utilizing the logic from the parent class should + # you find yourself here. + return 's3' + + def mangle_path_and_params(self, req): + """ + Returns a copy of the request object with fixed ``auth_path/params`` + attributes from the original. + """ + modified_req = copy.copy(req) + + # Unlike the most other services, in S3, ``req.params`` isn't the only + # source of query string parameters. + # Because of the ``query_args``, we may already have a query string + # **ON** the ``path/auth_path``. + # Rip them apart, so the ``auth_path/params`` can be signed + # appropriately. + parsed_path = urllib.parse.urlparse(modified_req.auth_path) + modified_req.auth_path = parsed_path.path + + if modified_req.params is None: + modified_req.params = {} + else: + # To keep the original request object untouched. We must make + # a copy of the params dictionary. Because the copy of the + # original request directly refers to the params dictionary + # of the original request. + copy_params = req.params.copy() + modified_req.params = copy_params + + raw_qs = parsed_path.query + existing_qs = urllib.parse.parse_qs( + raw_qs, + keep_blank_values=True + ) + + # ``parse_qs`` will return lists. Don't do that unless there's a real, + # live list provided. + for key, value in existing_qs.items(): + if isinstance(value, (list, tuple)): + if len(value) == 1: + existing_qs[key] = value[0] + + modified_req.params.update(existing_qs) + return modified_req + + def payload(self, http_request): + if http_request.headers.get('x-amz-content-sha256'): + return http_request.headers['x-amz-content-sha256'] + + return super(S3HmacAuthV4Handler, self).payload(http_request) + + def add_auth(self, req, **kwargs): + if 'x-amz-content-sha256' not in req.headers: + if '_sha256' in req.headers: + req.headers['x-amz-content-sha256'] = req.headers.pop('_sha256') + else: + req.headers['x-amz-content-sha256'] = self.payload(req) + updated_req = self.mangle_path_and_params(req) + return super(S3HmacAuthV4Handler, self).add_auth(updated_req, + unmangled_req=req, + **kwargs) + + def presign(self, req, expires, iso_date=None): + """ + Presign a request using SigV4 query params. Takes in an HTTP request + and an expiration time in seconds and returns a URL. + + http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html + """ + if iso_date is None: + iso_date = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ') + + region = self.determine_region_name(req.host) + service = self.determine_service_name(req.host) + + params = { + 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256', + 'X-Amz-Credential': '%s/%s/%s/%s/aws4_request' % ( + self._provider.access_key, + iso_date[:8], + region, + service + ), + 'X-Amz-Date': iso_date, + 'X-Amz-Expires': expires, + 'X-Amz-SignedHeaders': 'host' + } + + if self._provider.security_token: + params['X-Amz-Security-Token'] = self._provider.security_token + + headers_to_sign = self.headers_to_sign(req) + l = sorted(['%s' % n.lower().strip() for n in headers_to_sign]) + params['X-Amz-SignedHeaders'] = ';'.join(l) + + req.params.update(params) + + cr = self.canonical_request(req) + + # We need to replace the payload SHA with a constant + cr = '\n'.join(cr.split('\n')[:-1]) + '\nUNSIGNED-PAYLOAD' + + # Date header is expected for string_to_sign, but unused otherwise + req.headers['X-Amz-Date'] = iso_date + + sts = self.string_to_sign(req, cr) + signature = self.signature(req, sts) + + # Add signature to params now that we have it + req.params['X-Amz-Signature'] = signature + + return 'https://%s%s?%s' % (req.host, req.path, + urllib.parse.urlencode(req.params)) + + +class STSAnonHandler(AuthHandler): + """ + Provides pure query construction (no actual signing). + + Used for making anonymous STS request for operations like + ``assume_role_with_web_identity``. + """ + + capability = ['sts-anon'] + + def _escape_value(self, value): + # This is changed from a previous version because this string is + # being passed to the query string and query strings must + # be url encoded. In particular STS requires the saml_response to + # be urlencoded when calling assume_role_with_saml. + return urllib.parse.quote(value) + + def _build_query_string(self, params): + keys = list(params.keys()) + keys.sort(key=lambda x: x.lower()) + pairs = [] + for key in keys: + val = boto.utils.get_utf8_value(params[key]) + pairs.append(key + '=' + self._escape_value(val.decode('utf-8'))) + return '&'.join(pairs) + + def add_auth(self, http_request, **kwargs): + headers = http_request.headers + qs = self._build_query_string( + http_request.params + ) + boto.log.debug('query_string in body: %s' % qs) + headers['Content-Type'] = 'application/x-www-form-urlencoded' + # This will be a POST so the query string should go into the body + # as opposed to being in the uri + http_request.body = qs + + +class QuerySignatureHelper(HmacKeys): + """ + Helper for Query signature based Auth handler. + + Concrete sub class need to implement _calc_sigature method. + """ + + def add_auth(self, http_request, **kwargs): + headers = http_request.headers + params = http_request.params + params['AWSAccessKeyId'] = self._provider.access_key + params['SignatureVersion'] = self.SignatureVersion + params['Timestamp'] = boto.utils.get_ts() + qs, signature = self._calc_signature( + http_request.params, http_request.method, + http_request.auth_path, http_request.host) + boto.log.debug('query_string: %s Signature: %s' % (qs, signature)) + if http_request.method == 'POST': + headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' + http_request.body = qs + '&Signature=' + urllib.parse.quote_plus(signature) + http_request.headers['Content-Length'] = str(len(http_request.body)) + else: + http_request.body = '' + # if this is a retried request, the qs from the previous try will + # already be there, we need to get rid of that and rebuild it + http_request.path = http_request.path.split('?')[0] + http_request.path = (http_request.path + '?' + qs + + '&Signature=' + urllib.parse.quote_plus(signature)) + + +class QuerySignatureV0AuthHandler(QuerySignatureHelper, AuthHandler): + """Provides Signature V0 Signing""" + + SignatureVersion = 0 + capability = ['sign-v0'] + + def _calc_signature(self, params, *args): + boto.log.debug('using _calc_signature_0') + hmac = self._get_hmac() + s = params['Action'] + params['Timestamp'] + hmac.update(s.encode('utf-8')) + keys = params.keys() + keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower())) + pairs = [] + for key in keys: + val = boto.utils.get_utf8_value(params[key]) + pairs.append(key + '=' + urllib.parse.quote(val)) + qs = '&'.join(pairs) + return (qs, base64.b64encode(hmac.digest())) + + +class QuerySignatureV1AuthHandler(QuerySignatureHelper, AuthHandler): + """ + Provides Query Signature V1 Authentication. + """ + + SignatureVersion = 1 + capability = ['sign-v1', 'mturk'] + + def __init__(self, *args, **kw): + QuerySignatureHelper.__init__(self, *args, **kw) + AuthHandler.__init__(self, *args, **kw) + self._hmac_256 = None + + def _calc_signature(self, params, *args): + boto.log.debug('using _calc_signature_1') + hmac = self._get_hmac() + keys = params.keys() + keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower())) + pairs = [] + for key in keys: + hmac.update(key.encode('utf-8')) + val = boto.utils.get_utf8_value(params[key]) + hmac.update(val) + pairs.append(key + '=' + urllib.parse.quote(val)) + qs = '&'.join(pairs) + return (qs, base64.b64encode(hmac.digest())) + + +class QuerySignatureV2AuthHandler(QuerySignatureHelper, AuthHandler): + """Provides Query Signature V2 Authentication.""" + + SignatureVersion = 2 + capability = ['sign-v2', 'ec2', 'ec2', 'emr', 'fps', 'ecs', + 'sdb', 'iam', 'rds', 'sns', 'sqs', 'cloudformation'] + + def _calc_signature(self, params, verb, path, server_name): + boto.log.debug('using _calc_signature_2') + string_to_sign = '%s\n%s\n%s\n' % (verb, server_name.lower(), path) + hmac = self._get_hmac() + params['SignatureMethod'] = self.algorithm() + if self._provider.security_token: + params['SecurityToken'] = self._provider.security_token + keys = sorted(params.keys()) + pairs = [] + for key in keys: + val = boto.utils.get_utf8_value(params[key]) + pairs.append(urllib.parse.quote(key, safe='') + '=' + + urllib.parse.quote(val, safe='-_~')) + qs = '&'.join(pairs) + boto.log.debug('query string: %s' % qs) + string_to_sign += qs + boto.log.debug('string_to_sign: %s' % string_to_sign) + hmac.update(string_to_sign.encode('utf-8')) + b64 = base64.b64encode(hmac.digest()) + boto.log.debug('len(b64)=%d' % len(b64)) + boto.log.debug('base64 encoded digest: %s' % b64) + return (qs, b64) + + +class POSTPathQSV2AuthHandler(QuerySignatureV2AuthHandler, AuthHandler): + """ + Query Signature V2 Authentication relocating signed query + into the path and allowing POST requests with Content-Types. + """ + + capability = ['mws'] + + def add_auth(self, req, **kwargs): + req.params['AWSAccessKeyId'] = self._provider.access_key + req.params['SignatureVersion'] = self.SignatureVersion + req.params['Timestamp'] = boto.utils.get_ts() + qs, signature = self._calc_signature(req.params, req.method, + req.auth_path, req.host) + boto.log.debug('query_string: %s Signature: %s' % (qs, signature)) + if req.method == 'POST': + req.headers['Content-Length'] = str(len(req.body)) + req.headers['Content-Type'] = req.headers.get('Content-Type', + 'text/plain') + else: + req.body = '' + # if this is a retried req, the qs from the previous try will + # already be there, we need to get rid of that and rebuild it + req.path = req.path.split('?')[0] + req.path = (req.path + '?' + qs + + '&Signature=' + urllib.parse.quote_plus(signature)) + + +def get_auth_handler(host, config, provider, requested_capability=None): + """Finds an AuthHandler that is ready to authenticate. + + Lists through all the registered AuthHandlers to find one that is willing + to handle for the requested capabilities, config and provider. + + :type host: string + :param host: The name of the host + + :type config: + :param config: + + :type provider: + :param provider: + + Returns: + An implementation of AuthHandler. + + Raises: + boto.exception.NoAuthHandlerFound + """ + ready_handlers = [] + auth_handlers = boto.plugin.get_plugin(AuthHandler, requested_capability) + for handler in auth_handlers: + try: + ready_handlers.append(handler(host, config, provider)) + except boto.auth_handler.NotReadyToAuthenticate: + pass + + if not ready_handlers: + checked_handlers = auth_handlers + names = [handler.__name__ for handler in checked_handlers] + raise boto.exception.NoAuthHandlerFound( + 'No handler was ready to authenticate. %d handlers were checked.' + ' %s ' + 'Check your credentials' % (len(names), str(names))) + + # We select the last ready auth handler that was loaded, to allow users to + # customize how auth works in environments where there are shared boto + # config files (e.g., /etc/boto.cfg and ~/.boto): The more general, + # system-wide shared configs should be loaded first, and the user's + # customizations loaded last. That way, for example, the system-wide + # config might include a plugin_directory that includes a service account + # auth plugin shared by all users of a Google Compute Engine instance + # (allowing sharing of non-user data between various services), and the + # user could override this with a .boto config that includes user-specific + # credentials (for access to user data). + return ready_handlers[-1] + + +def detect_potential_sigv4(func): + def _wrapper(self): + if os.environ.get('EC2_USE_SIGV4', False): + return ['hmac-v4'] + + if boto.config.get('ec2', 'use-sigv4', False): + return ['hmac-v4'] + + if hasattr(self, 'region'): + # If you're making changes here, you should also check + # ``boto/iam/connection.py``, as several things there are also + # endpoint-related. + if getattr(self.region, 'endpoint', ''): + for test in SIGV4_DETECT: + if test in self.region.endpoint: + return ['hmac-v4'] + + return func(self) + return _wrapper + + +def detect_potential_s3sigv4(func): + def _wrapper(self): + if os.environ.get('S3_USE_SIGV4', False): + return ['hmac-v4-s3'] + + if boto.config.get('s3', 'use-sigv4', False): + return ['hmac-v4-s3'] + + if hasattr(self, 'host'): + # If you're making changes here, you should also check + # ``boto/iam/connection.py``, as several things there are also + # endpoint-related. + for test in SIGV4_DETECT: + if test in self.host: + return ['hmac-v4-s3'] + + return func(self) + return _wrapper diff --git a/desktop/core/ext-py/boto-2.38.0/boto/auth_handler.py b/desktop/core/ext-py/boto-2.38.0/boto/auth_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..a8583f8aa3509f2596163666e784d45e2c801157 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/auth_handler.py @@ -0,0 +1,60 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Defines an interface which all Auth handlers need to implement. +""" + +from boto.plugin import Plugin + + +class NotReadyToAuthenticate(Exception): + pass + + +class AuthHandler(Plugin): + + capability = [] + + def __init__(self, host, config, provider): + """Constructs the handlers. + :type host: string + :param host: The host to which the request is being sent. + + :type config: boto.pyami.Config + :param config: Boto configuration. + + :type provider: boto.provider.Provider + :param provider: Provider details. + + Raises: + NotReadyToAuthenticate: if this handler is not willing to + authenticate for the given provider and config. + """ + pass + + def add_auth(self, http_request): + """Invoked to add authentication details to request. + + :type http_request: boto.connection.HTTPRequest + :param http_request: HTTP request that needs to be authenticated. + """ + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/awslambda/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/awslambda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e1f782caafe397852ab98c5e9a721451ecbee01 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/awslambda/__init__.py @@ -0,0 +1,40 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS Lambda service. + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.awslambda.layer1 import AWSLambdaConnection + return get_regions('awslambda', + connection_cls=AWSLambdaConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/awslambda/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/awslambda/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..7e1515117f0b1b0d2ccee860601160a21298a658 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/awslambda/exceptions.py @@ -0,0 +1,38 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class InvalidRequestContentException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class InvalidParameterValueException(BotoServerError): + pass + + +class ServiceException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/awslambda/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/awslambda/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..01603f6f02e71a2e81d3dc47950a63b07154af46 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/awslambda/layer1.py @@ -0,0 +1,517 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os + +from boto.compat import json +from boto.exception import JSONResponseError +from boto.connection import AWSAuthConnection +from boto.regioninfo import RegionInfo +from boto.awslambda import exceptions + + +class AWSLambdaConnection(AWSAuthConnection): + """ + AWS Lambda + **Overview** + + This is the AWS Lambda API Reference. The AWS Lambda Developer + Guide provides additional information. For the service overview, + go to `What is AWS Lambda`_, and for information about how the + service works, go to `AWS LambdaL How it Works`_ in the AWS Lambda + Developer Guide. + """ + APIVersion = "2014-11-11" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "lambda.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "InvalidRequestContentException": exceptions.InvalidRequestContentException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InvalidParameterValueException": exceptions.InvalidParameterValueException, + "ServiceException": exceptions.ServiceException, + } + + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + else: + del kwargs['region'] + kwargs['host'] = region.endpoint + super(AWSLambdaConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_event_source(self, event_source, function_name, role, + batch_size=None, parameters=None): + """ + Identifies an Amazon Kinesis stream as the event source for an + AWS Lambda function. AWS Lambda invokes the specified function + when records are posted to the stream. + + This is the pull model, where AWS Lambda invokes the function. + For more information, go to `AWS LambdaL How it Works`_ in the + AWS Lambda Developer Guide. + + This association between an Amazon Kinesis stream and an AWS + Lambda function is called the event source mapping. You + provide the configuration information (for example, which + stream to read from and which AWS Lambda function to invoke) + for the event source mapping in the request body. + + This operation requires permission for the `iam:PassRole` + action for the IAM role. It also requires permission for the + `lambda:AddEventSource` action. + + :type event_source: string + :param event_source: The Amazon Resource Name (ARN) of the Amazon + Kinesis stream that is the event source. Any record added to this + stream causes AWS Lambda to invoke your Lambda function. AWS Lambda + POSTs the Amazon Kinesis event, containing records, to your Lambda + function as JSON. + + :type function_name: string + :param function_name: The Lambda function to invoke when AWS Lambda + detects an event on the stream. + + :type role: string + :param role: The ARN of the IAM role (invocation role) that AWS Lambda + can assume to read from the stream and invoke the function. + + :type batch_size: integer + :param batch_size: The largest number of records that AWS Lambda will + give to your function in a single event. The default is 100 + records. + + :type parameters: map + :param parameters: A map (key-value pairs) defining the configuration + for AWS Lambda to use when reading the event source. Currently, AWS + Lambda supports only the `InitialPositionInStream` key. The valid + values are: "TRIM_HORIZON" and "LATEST". The default value is + "TRIM_HORIZON". For more information, go to `ShardIteratorType`_ in + the Amazon Kinesis Service API Reference. + + """ + + uri = '/2014-11-13/event-source-mappings/' + params = { + 'EventSource': event_source, + 'FunctionName': function_name, + 'Role': role, + } + headers = {} + query_params = {} + if batch_size is not None: + params['BatchSize'] = batch_size + if parameters is not None: + params['Parameters'] = parameters + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def delete_function(self, function_name): + """ + Deletes the specified Lambda function code and configuration. + + This operation requires permission for the + `lambda:DeleteFunction` action. + + :type function_name: string + :param function_name: The Lambda function to delete. + + """ + + uri = '/2014-11-13/functions/{0}'.format(function_name) + return self.make_request('DELETE', uri, expected_status=204) + + def get_event_source(self, uuid): + """ + Returns configuration information for the specified event + source mapping (see AddEventSource). + + This operation requires permission for the + `lambda:GetEventSource` action. + + :type uuid: string + :param uuid: The AWS Lambda assigned ID of the event source mapping. + + """ + + uri = '/2014-11-13/event-source-mappings/{0}'.format(uuid) + return self.make_request('GET', uri, expected_status=200) + + def get_function(self, function_name): + """ + Returns the configuration information of the Lambda function + and a presigned URL link to the .zip file you uploaded with + UploadFunction so you can download the .zip file. Note that + the URL is valid for up to 10 minutes. The configuration + information is the same information you provided as parameters + when uploading the function. + + This operation requires permission for the + `lambda:GetFunction` action. + + :type function_name: string + :param function_name: The Lambda function name. + + """ + + uri = '/2014-11-13/functions/{0}'.format(function_name) + return self.make_request('GET', uri, expected_status=200) + + def get_function_configuration(self, function_name): + """ + Returns the configuration information of the Lambda function. + This the same information you provided as parameters when + uploading the function by using UploadFunction. + + This operation requires permission for the + `lambda:GetFunctionConfiguration` operation. + + :type function_name: string + :param function_name: The name of the Lambda function for which you + want to retrieve the configuration information. + + """ + + uri = '/2014-11-13/functions/{0}/configuration'.format(function_name) + return self.make_request('GET', uri, expected_status=200) + + def invoke_async(self, function_name, invoke_args): + """ + Submits an invocation request to AWS Lambda. Upon receiving + the request, Lambda executes the specified function + asynchronously. To see the logs generated by the Lambda + function execution, see the CloudWatch logs console. + + This operation requires permission for the + `lambda:InvokeAsync` action. + + :type function_name: string + :param function_name: The Lambda function name. + + :type invoke_args: blob + :param invoke_args: JSON that you want to provide to your Lambda + function as input. + + """ + uri = '/2014-11-13/functions/{0}/invoke-async/'.format(function_name) + headers = {} + query_params = {} + try: + content_length = str(len(invoke_args)) + except (TypeError, AttributeError): + # If a file like object is provided and seekable, try to retrieve + # the file size via fstat. + try: + invoke_args.tell() + except (AttributeError, OSError, IOError): + raise TypeError( + "File-like object passed to parameter " + "``invoke_args`` must be seekable." + ) + content_length = str(os.fstat(invoke_args.fileno()).st_size) + headers['Content-Length'] = content_length + return self.make_request('POST', uri, expected_status=202, + data=invoke_args, headers=headers, + params=query_params) + + def list_event_sources(self, event_source_arn=None, function_name=None, + marker=None, max_items=None): + """ + Returns a list of event source mappings. For each mapping, the + API returns configuration information (see AddEventSource). + You can optionally specify filters to retrieve specific event + source mappings. + + This operation requires permission for the + `lambda:ListEventSources` action. + + :type event_source_arn: string + :param event_source_arn: The Amazon Resource Name (ARN) of the Amazon + Kinesis stream. + + :type function_name: string + :param function_name: The name of the AWS Lambda function. + + :type marker: string + :param marker: Optional string. An opaque pagination token returned + from a previous `ListEventSources` operation. If present, specifies + to continue the list from where the returning call left off. + + :type max_items: integer + :param max_items: Optional integer. Specifies the maximum number of + event sources to return in response. This value must be greater + than 0. + + """ + + uri = '/2014-11-13/event-source-mappings/' + params = {} + headers = {} + query_params = {} + if event_source_arn is not None: + query_params['EventSource'] = event_source_arn + if function_name is not None: + query_params['FunctionName'] = function_name + if marker is not None: + query_params['Marker'] = marker + if max_items is not None: + query_params['MaxItems'] = max_items + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def list_functions(self, marker=None, max_items=None): + """ + Returns a list of your Lambda functions. For each function, + the response includes the function configuration information. + You must use GetFunction to retrieve the code for your + function. + + This operation requires permission for the + `lambda:ListFunctions` action. + + :type marker: string + :param marker: Optional string. An opaque pagination token returned + from a previous `ListFunctions` operation. If present, indicates + where to continue the listing. + + :type max_items: integer + :param max_items: Optional integer. Specifies the maximum number of AWS + Lambda functions to return in response. This parameter value must + be greater than 0. + + """ + + uri = '/2014-11-13/functions/' + params = {} + headers = {} + query_params = {} + if marker is not None: + query_params['Marker'] = marker + if max_items is not None: + query_params['MaxItems'] = max_items + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def remove_event_source(self, uuid): + """ + Removes an event source mapping. This means AWS Lambda will no + longer invoke the function for events in the associated + source. + + This operation requires permission for the + `lambda:RemoveEventSource` action. + + :type uuid: string + :param uuid: The event source mapping ID. + + """ + + uri = '/2014-11-13/event-source-mappings/{0}'.format(uuid) + return self.make_request('DELETE', uri, expected_status=204) + + def update_function_configuration(self, function_name, role=None, + handler=None, description=None, + timeout=None, memory_size=None): + """ + Updates the configuration parameters for the specified Lambda + function by using the values provided in the request. You + provide only the parameters you want to change. This operation + must only be used on an existing Lambda function and cannot be + used to update the function's code. + + This operation requires permission for the + `lambda:UpdateFunctionConfiguration` action. + + :type function_name: string + :param function_name: The name of the Lambda function. + + :type role: string + :param role: The Amazon Resource Name (ARN) of the IAM role that Lambda + will assume when it executes your function. + + :type handler: string + :param handler: The function that Lambda calls to begin executing your + function. For Node.js, it is the module-name.export value in your + function. + + :type description: string + :param description: A short user-defined function description. Lambda + does not use this value. Assign a meaningful description as you see + fit. + + :type timeout: integer + :param timeout: The function execution time at which Lambda should + terminate the function. Because the execution time has cost + implications, we recommend you set this value based on your + expected execution time. The default is 3 seconds. + + :type memory_size: integer + :param memory_size: The amount of memory, in MB, your Lambda function + is given. Lambda uses this memory size to infer the amount of CPU + allocated to your function. Your function use-case determines your + CPU and memory requirements. For example, a database operation + might need less memory compared to an image processing function. + The default value is 128 MB. The value must be a multiple of 64 MB. + + """ + + uri = '/2014-11-13/functions/{0}/configuration'.format(function_name) + params = {} + headers = {} + query_params = {} + if role is not None: + query_params['Role'] = role + if handler is not None: + query_params['Handler'] = handler + if description is not None: + query_params['Description'] = description + if timeout is not None: + query_params['Timeout'] = timeout + if memory_size is not None: + query_params['MemorySize'] = memory_size + return self.make_request('PUT', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def upload_function(self, function_name, function_zip, runtime, role, + handler, mode, description=None, timeout=None, + memory_size=None): + """ + Creates a new Lambda function or updates an existing function. + The function metadata is created from the request parameters, + and the code for the function is provided by a .zip file in + the request body. If the function name already exists, the + existing Lambda function is updated with the new code and + metadata. + + This operation requires permission for the + `lambda:UploadFunction` action. + + :type function_name: string + :param function_name: The name you want to assign to the function you + are uploading. The function names appear in the console and are + returned in the ListFunctions API. Function names are used to + specify functions to other AWS Lambda APIs, such as InvokeAsync. + + :type function_zip: blob + :param function_zip: A .zip file containing your packaged source code. + For more information about creating a .zip file, go to `AWS LambdaL + How it Works`_ in the AWS Lambda Developer Guide. + + :type runtime: string + :param runtime: The runtime environment for the Lambda function you are + uploading. Currently, Lambda supports only "nodejs" as the runtime. + + :type role: string + :param role: The Amazon Resource Name (ARN) of the IAM role that Lambda + assumes when it executes your function to access any other Amazon + Web Services (AWS) resources. + + :type handler: string + :param handler: The function that Lambda calls to begin execution. For + Node.js, it is the module-name . export value in your function. + + :type mode: string + :param mode: How the Lambda function will be invoked. Lambda supports + only the "event" mode. + + :type description: string + :param description: A short, user-defined function description. Lambda + does not use this value. Assign a meaningful description as you see + fit. + + :type timeout: integer + :param timeout: The function execution time at which Lambda should + terminate the function. Because the execution time has cost + implications, we recommend you set this value based on your + expected execution time. The default is 3 seconds. + + :type memory_size: integer + :param memory_size: The amount of memory, in MB, your Lambda function + is given. Lambda uses this memory size to infer the amount of CPU + allocated to your function. Your function use-case determines your + CPU and memory requirements. For example, database operation might + need less memory compared to image processing function. The default + value is 128 MB. The value must be a multiple of 64 MB. + + """ + uri = '/2014-11-13/functions/{0}'.format(function_name) + headers = {} + query_params = {} + if runtime is not None: + query_params['Runtime'] = runtime + if role is not None: + query_params['Role'] = role + if handler is not None: + query_params['Handler'] = handler + if mode is not None: + query_params['Mode'] = mode + if description is not None: + query_params['Description'] = description + if timeout is not None: + query_params['Timeout'] = timeout + if memory_size is not None: + query_params['MemorySize'] = memory_size + + try: + content_length = str(len(function_zip)) + except (TypeError, AttributeError): + # If a file like object is provided and seekable, try to retrieve + # the file size via fstat. + try: + function_zip.tell() + except (AttributeError, OSError, IOError): + raise TypeError( + "File-like object passed to parameter " + "``function_zip`` must be seekable." + ) + content_length = str(os.fstat(function_zip.fileno()).st_size) + headers['Content-Length'] = content_length + return self.make_request('PUT', uri, expected_status=201, + data=function_zip, headers=headers, + params=query_params) + + def make_request(self, verb, resource, headers=None, data='', + expected_status=None, params=None): + if headers is None: + headers = {} + response = AWSAuthConnection.make_request( + self, verb, resource, headers=headers, data=data, params=params) + body = response.read().decode('utf-8') + if body: + body = json.loads(body) + if response.status == expected_status: + return body + else: + error_type = response.getheader('x-amzn-ErrorType').split(':')[0] + error_class = self._faults.get(error_type, self.ResponseError) + raise error_class(response.status, response.reason, body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c3928bcd1b57d85c3a5337a152697bc5664efdcc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/__init__.py @@ -0,0 +1,44 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS Elastic Beanstalk service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + import boto.beanstalk.layer1 + return get_regions( + 'elasticbeanstalk', + connection_cls=boto.beanstalk.layer1.Layer1 + ) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/exception.py b/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/exception.py new file mode 100644 index 0000000000000000000000000000000000000000..0fbd4ab9fa1c9544f8aea9b0faa611eb308bc18c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/exception.py @@ -0,0 +1,63 @@ +import sys +from boto.compat import json +from boto.exception import BotoServerError + + +def simple(e): + code = e.code + + if code.endswith('Exception'): + code = code.rstrip('Exception') + + try: + # Dynamically get the error class. + simple_e = getattr(sys.modules[__name__], code)(e) + except AttributeError: + # Return original exception on failure. + return e + + return simple_e + + +class SimpleException(BotoServerError): + def __init__(self, e): + super(SimpleException, self).__init__(e.status, e.reason, e.body) + self.error_message = self.message + + def __repr__(self): + return self.__class__.__name__ + ': ' + self.error_message + def __str__(self): + return self.__class__.__name__ + ': ' + self.error_message + + +class ValidationError(SimpleException): pass + +# Common beanstalk exceptions. +class IncompleteSignature(SimpleException): pass +class InternalFailure(SimpleException): pass +class InvalidAction(SimpleException): pass +class InvalidClientTokenId(SimpleException): pass +class InvalidParameterCombination(SimpleException): pass +class InvalidParameterValue(SimpleException): pass +class InvalidQueryParameter(SimpleException): pass +class MalformedQueryString(SimpleException): pass +class MissingAction(SimpleException): pass +class MissingAuthenticationToken(SimpleException): pass +class MissingParameter(SimpleException): pass +class OptInRequired(SimpleException): pass +class RequestExpired(SimpleException): pass +class ServiceUnavailable(SimpleException): pass +class Throttling(SimpleException): pass + + +# Action specific exceptions. +class TooManyApplications(SimpleException): pass +class InsufficientPrivileges(SimpleException): pass +class S3LocationNotInServiceRegion(SimpleException): pass +class TooManyApplicationVersions(SimpleException): pass +class TooManyConfigurationTemplates(SimpleException): pass +class TooManyEnvironments(SimpleException): pass +class S3SubscriptionRequired(SimpleException): pass +class TooManyBuckets(SimpleException): pass +class OperationInProgress(SimpleException): pass +class SourceBundleDeletion(SimpleException): pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..e72ee23ec5fa1d2b6b4f0402f5f0a634eb89da07 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/layer1.py @@ -0,0 +1,1201 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +import boto.jsonresponse +from boto.compat import json +from boto.regioninfo import RegionInfo +from boto.connection import AWSQueryConnection + + +class Layer1(AWSQueryConnection): + + APIVersion = '2010-12-01' + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'elasticbeanstalk.us-east-1.amazonaws.com' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, + proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + api_version=None, security_token=None, profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(Layer1, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def _encode_bool(self, v): + v = bool(v) + return {True: "true", False: "false"}[v] + + def _get_response(self, action, params, path='/', verb='GET'): + params['ContentType'] = 'JSON' + response = self.make_request(action, params, path, verb) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + raise self.ResponseError(response.status, response.reason, body) + + def check_dns_availability(self, cname_prefix): + """Checks if the specified CNAME is available. + + :type cname_prefix: string + :param cname_prefix: The prefix used when this CNAME is + reserved. + """ + params = {'CNAMEPrefix': cname_prefix} + return self._get_response('CheckDNSAvailability', params) + + def create_application(self, application_name, description=None): + """ + Creates an application that has one configuration template + named default and no application versions. + + :type application_name: string + :param application_name: The name of the application. + Constraint: This name must be unique within your account. If the + specified name already exists, the action returns an + InvalidParameterValue error. + + :type description: string + :param description: Describes the application. + + :raises: TooManyApplicationsException + """ + params = {'ApplicationName': application_name} + if description: + params['Description'] = description + return self._get_response('CreateApplication', params) + + def create_application_version(self, application_name, version_label, + description=None, s3_bucket=None, + s3_key=None, auto_create_application=None): + """Creates an application version for the specified application. + + :type application_name: string + :param application_name: The name of the application. If no + application is found with this name, and AutoCreateApplication is + false, returns an InvalidParameterValue error. + + :type version_label: string + :param version_label: A label identifying this version. Constraint: + Must be unique per application. If an application version already + exists with this label for the specified application, AWS Elastic + Beanstalk returns an InvalidParameterValue error. + + :type description: string + :param description: Describes this version. + + :type s3_bucket: string + :param s3_bucket: The Amazon S3 bucket where the data is located. + + :type s3_key: string + :param s3_key: The Amazon S3 key where the data is located. Both + s3_bucket and s3_key must be specified in order to use a specific + source bundle. If both of these values are not specified the + sample application will be used. + + :type auto_create_application: boolean + :param auto_create_application: Determines how the system behaves if + the specified application for this version does not already exist: + true: Automatically creates the specified application for this + version if it does not already exist. false: Returns an + InvalidParameterValue if the specified application for this version + does not already exist. Default: false Valid Values: true | false + + :raises: TooManyApplicationsException, + TooManyApplicationVersionsException, + InsufficientPrivilegesException, + S3LocationNotInServiceRegionException + + """ + params = {'ApplicationName': application_name, + 'VersionLabel': version_label} + if description: + params['Description'] = description + if s3_bucket and s3_key: + params['SourceBundle.S3Bucket'] = s3_bucket + params['SourceBundle.S3Key'] = s3_key + if auto_create_application: + params['AutoCreateApplication'] = self._encode_bool( + auto_create_application) + return self._get_response('CreateApplicationVersion', params) + + def create_configuration_template(self, application_name, template_name, + solution_stack_name=None, + source_configuration_application_name=None, + source_configuration_template_name=None, + environment_id=None, description=None, + option_settings=None): + """Creates a configuration template. + + Templates are associated with a specific application and are used to + deploy different versions of the application with the same + configuration settings. + + :type application_name: string + :param application_name: The name of the application to associate with + this configuration template. If no application is found with this + name, AWS Elastic Beanstalk returns an InvalidParameterValue error. + + :type template_name: string + :param template_name: The name of the configuration template. + Constraint: This name must be unique per application. Default: If + a configuration template already exists with this name, AWS Elastic + Beanstalk returns an InvalidParameterValue error. + + :type solution_stack_name: string + :param solution_stack_name: The name of the solution stack used by this + configuration. The solution stack specifies the operating system, + architecture, and application server for a configuration template. + It determines the set of configuration options as well as the + possible and default values. Use ListAvailableSolutionStacks to + obtain a list of available solution stacks. Default: If the + SolutionStackName is not specified and the source configuration + parameter is blank, AWS Elastic Beanstalk uses the default solution + stack. If not specified and the source configuration parameter is + specified, AWS Elastic Beanstalk uses the same solution stack as + the source configuration template. + + :type source_configuration_application_name: string + :param source_configuration_application_name: The name of the + application associated with the configuration. + + :type source_configuration_template_name: string + :param source_configuration_template_name: The name of the + configuration template. + + :type environment_id: string + :param environment_id: The ID of the environment used with this + configuration template. + + :type description: string + :param description: Describes this configuration. + + :type option_settings: list + :param option_settings: If specified, AWS Elastic Beanstalk sets the + specified configuration option to the requested value. The new + value overrides the value obtained from the solution stack or the + source configuration template. + + :raises: InsufficientPrivilegesException, + TooManyConfigurationTemplatesException + """ + params = {'ApplicationName': application_name, + 'TemplateName': template_name} + if solution_stack_name: + params['SolutionStackName'] = solution_stack_name + if source_configuration_application_name: + params['SourceConfiguration.ApplicationName'] = source_configuration_application_name + if source_configuration_template_name: + params['SourceConfiguration.TemplateName'] = source_configuration_template_name + if environment_id: + params['EnvironmentId'] = environment_id + if description: + params['Description'] = description + if option_settings: + self._build_list_params(params, option_settings, + 'OptionSettings.member', + ('Namespace', 'OptionName', 'Value')) + return self._get_response('CreateConfigurationTemplate', params) + + def create_environment(self, application_name, environment_name, + version_label=None, template_name=None, + solution_stack_name=None, cname_prefix=None, + description=None, option_settings=None, + options_to_remove=None, tier_name=None, + tier_type=None, tier_version='1.0'): + """Launches an environment for the application using a configuration. + + :type application_name: string + :param application_name: The name of the application that contains the + version to be deployed. If no application is found with this name, + CreateEnvironment returns an InvalidParameterValue error. + + :type environment_name: string + :param environment_name: A unique name for the deployment environment. + Used in the application URL. Constraint: Must be from 4 to 23 + characters in length. The name can contain only letters, numbers, + and hyphens. It cannot start or end with a hyphen. This name must + be unique in your account. If the specified name already exists, + AWS Elastic Beanstalk returns an InvalidParameterValue error. + Default: If the CNAME parameter is not specified, the environment + name becomes part of the CNAME, and therefore part of the visible + URL for your application. + + :type version_label: string + :param version_label: The name of the application version to deploy. If + the specified application has no associated application versions, + AWS Elastic Beanstalk UpdateEnvironment returns an + InvalidParameterValue error. Default: If not specified, AWS + Elastic Beanstalk attempts to launch the most recently created + application version. + + :type template_name: string + :param template_name: The name of the configuration template to + use in deployment. If no configuration template is found with this + name, AWS Elastic Beanstalk returns an InvalidParameterValue error. + Condition: You must specify either this parameter or a + SolutionStackName, but not both. If you specify both, AWS Elastic + Beanstalk returns an InvalidParameterCombination error. If you do + not specify either, AWS Elastic Beanstalk returns a + MissingRequiredParameter error. + + :type solution_stack_name: string + :param solution_stack_name: This is an alternative to specifying a + configuration name. If specified, AWS Elastic Beanstalk sets the + configuration values to the default values associated with the + specified solution stack. Condition: You must specify either this + or a TemplateName, but not both. If you specify both, AWS Elastic + Beanstalk returns an InvalidParameterCombination error. If you do + not specify either, AWS Elastic Beanstalk returns a + MissingRequiredParameter error. + + :type cname_prefix: string + :param cname_prefix: If specified, the environment attempts to use this + value as the prefix for the CNAME. If not specified, the + environment uses the environment name. + + :type description: string + :param description: Describes this environment. + + :type option_settings: list + :param option_settings: If specified, AWS Elastic Beanstalk sets the + specified configuration options to the requested value in the + configuration set for the new environment. These override the + values obtained from the solution stack or the configuration + template. Each element in the list is a tuple of (Namespace, + OptionName, Value), for example:: + + [('aws:autoscaling:launchconfiguration', + 'Ec2KeyName', 'mykeypair')] + + :type options_to_remove: list + :param options_to_remove: A list of custom user-defined configuration + options to remove from the configuration set for this new + environment. + + :type tier_name: string + :param tier_name: The name of the tier. Valid values are + "WebServer" and "Worker". Defaults to "WebServer". + The ``tier_name`` and a ``tier_type`` parameters are + related and the values provided must be valid. + The possible combinations are: + + * "WebServer" and "Standard" (the default) + * "Worker" and "SQS/HTTP" + + :type tier_type: string + :param tier_type: The type of the tier. Valid values are + "Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP" + if ``tier_name`` is "Worker". Defaults to "Standard". + + :type tier_version: string + :type tier_version: The version of the tier. Valid values + currently are "1.0". Defaults to "1.0". + + :raises: TooManyEnvironmentsException, InsufficientPrivilegesException + + """ + params = {'ApplicationName': application_name, + 'EnvironmentName': environment_name} + if version_label: + params['VersionLabel'] = version_label + if template_name: + params['TemplateName'] = template_name + if solution_stack_name: + params['SolutionStackName'] = solution_stack_name + if cname_prefix: + params['CNAMEPrefix'] = cname_prefix + if description: + params['Description'] = description + if option_settings: + self._build_list_params(params, option_settings, + 'OptionSettings.member', + ('Namespace', 'OptionName', 'Value')) + if options_to_remove: + self.build_list_params(params, options_to_remove, + 'OptionsToRemove.member') + if tier_name and tier_type and tier_version: + params['Tier.Name'] = tier_name + params['Tier.Type'] = tier_type + params['Tier.Version'] = tier_version + return self._get_response('CreateEnvironment', params) + + def create_storage_location(self): + """ + Creates the Amazon S3 storage location for the account. This + location is used to store user log files. + + :raises: TooManyBucketsException, + S3SubscriptionRequiredException, + InsufficientPrivilegesException + + """ + return self._get_response('CreateStorageLocation', params={}) + + def delete_application(self, application_name, + terminate_env_by_force=None): + """ + Deletes the specified application along with all associated + versions and configurations. The application versions will not + be deleted from your Amazon S3 bucket. + + :type application_name: string + :param application_name: The name of the application to delete. + + :type terminate_env_by_force: boolean + :param terminate_env_by_force: When set to true, running + environments will be terminated before deleting the application. + + :raises: OperationInProgressException + + """ + params = {'ApplicationName': application_name} + if terminate_env_by_force: + params['TerminateEnvByForce'] = self._encode_bool( + terminate_env_by_force) + return self._get_response('DeleteApplication', params) + + def delete_application_version(self, application_name, version_label, + delete_source_bundle=None): + """Deletes the specified version from the specified application. + + :type application_name: string + :param application_name: The name of the application to delete + releases from. + + :type version_label: string + :param version_label: The label of the version to delete. + + :type delete_source_bundle: boolean + :param delete_source_bundle: Indicates whether to delete the + associated source bundle from Amazon S3. Valid Values: true | + false + + :raises: SourceBundleDeletionException, + InsufficientPrivilegesException, + OperationInProgressException, + S3LocationNotInServiceRegionException + """ + params = {'ApplicationName': application_name, + 'VersionLabel': version_label} + if delete_source_bundle: + params['DeleteSourceBundle'] = self._encode_bool( + delete_source_bundle) + return self._get_response('DeleteApplicationVersion', params) + + def delete_configuration_template(self, application_name, template_name): + """Deletes the specified configuration template. + + :type application_name: string + :param application_name: The name of the application to delete + the configuration template from. + + :type template_name: string + :param template_name: The name of the configuration template to + delete. + + :raises: OperationInProgressException + + """ + params = {'ApplicationName': application_name, + 'TemplateName': template_name} + return self._get_response('DeleteConfigurationTemplate', params) + + def delete_environment_configuration(self, application_name, + environment_name): + """ + Deletes the draft configuration associated with the running + environment. Updating a running environment with any + configuration changes creates a draft configuration set. You can + get the draft configuration using DescribeConfigurationSettings + while the update is in progress or if the update fails. The + DeploymentStatus for the draft configuration indicates whether + the deployment is in process or has failed. The draft + configuration remains in existence until it is deleted with this + action. + + :type application_name: string + :param application_name: The name of the application the + environment is associated with. + + :type environment_name: string + :param environment_name: The name of the environment to delete + the draft configuration from. + + """ + params = {'ApplicationName': application_name, + 'EnvironmentName': environment_name} + return self._get_response('DeleteEnvironmentConfiguration', params) + + def describe_application_versions(self, application_name=None, + version_labels=None): + """Returns descriptions for existing application versions. + + :type application_name: string + :param application_name: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to only include ones that are associated + with the specified application. + + :type version_labels: list + :param version_labels: If specified, restricts the returned + descriptions to only include ones that have the specified version + labels. + + """ + params = {} + if application_name: + params['ApplicationName'] = application_name + if version_labels: + self.build_list_params(params, version_labels, + 'VersionLabels.member') + return self._get_response('DescribeApplicationVersions', params) + + def describe_applications(self, application_names=None): + """Returns the descriptions of existing applications. + + :type application_names: list + :param application_names: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to only include those with the specified + names. + + """ + params = {} + if application_names: + self.build_list_params(params, application_names, + 'ApplicationNames.member') + return self._get_response('DescribeApplications', params) + + def describe_configuration_options(self, application_name=None, + template_name=None, + environment_name=None, + solution_stack_name=None, options=None): + """Describes configuration options used in a template or environment. + + Describes the configuration options that are used in a + particular configuration template or environment, or that a + specified solution stack defines. The description includes the + values the options, their default values, and an indication of + the required action on a running environment if an option value + is changed. + + :type application_name: string + :param application_name: The name of the application associated with + the configuration template or environment. Only needed if you want + to describe the configuration options associated with either the + configuration template or environment. + + :type template_name: string + :param template_name: The name of the configuration template whose + configuration options you want to describe. + + :type environment_name: string + :param environment_name: The name of the environment whose + configuration options you want to describe. + + :type solution_stack_name: string + :param solution_stack_name: The name of the solution stack whose + configuration options you want to describe. + + :type options: list + :param options: If specified, restricts the descriptions to only + the specified options. + """ + params = {} + if application_name: + params['ApplicationName'] = application_name + if template_name: + params['TemplateName'] = template_name + if environment_name: + params['EnvironmentName'] = environment_name + if solution_stack_name: + params['SolutionStackName'] = solution_stack_name + if options: + self.build_list_params(params, options, 'Options.member') + return self._get_response('DescribeConfigurationOptions', params) + + def describe_configuration_settings(self, application_name, + template_name=None, + environment_name=None): + """ + Returns a description of the settings for the specified + configuration set, that is, either a configuration template or + the configuration set associated with a running environment. + When describing the settings for the configuration set + associated with a running environment, it is possible to receive + two sets of setting descriptions. One is the deployed + configuration set, and the other is a draft configuration of an + environment that is either in the process of deployment or that + failed to deploy. + + :type application_name: string + :param application_name: The application for the environment or + configuration template. + + :type template_name: string + :param template_name: The name of the configuration template to + describe. Conditional: You must specify either this parameter or + an EnvironmentName, but not both. If you specify both, AWS Elastic + Beanstalk returns an InvalidParameterCombination error. If you do + not specify either, AWS Elastic Beanstalk returns a + MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment to describe. + Condition: You must specify either this or a TemplateName, but not + both. If you specify both, AWS Elastic Beanstalk returns an + InvalidParameterCombination error. If you do not specify either, + AWS Elastic Beanstalk returns MissingRequiredParameter error. + """ + params = {'ApplicationName': application_name} + if template_name: + params['TemplateName'] = template_name + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('DescribeConfigurationSettings', params) + + def describe_environment_resources(self, environment_id=None, + environment_name=None): + """Returns AWS resources for this environment. + + :type environment_id: string + :param environment_id: The ID of the environment to retrieve AWS + resource usage data. Condition: You must specify either this or an + EnvironmentName, or both. If you do not specify either, AWS Elastic + Beanstalk returns MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment to retrieve + AWS resource usage data. Condition: You must specify either this + or an EnvironmentId, or both. If you do not specify either, AWS + Elastic Beanstalk returns MissingRequiredParameter error. + + :raises: InsufficientPrivilegesException + """ + params = {} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('DescribeEnvironmentResources', params) + + def describe_environments(self, application_name=None, version_label=None, + environment_ids=None, environment_names=None, + include_deleted=None, + included_deleted_back_to=None): + """Returns descriptions for existing environments. + + :type application_name: string + :param application_name: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to include only those that are associated + with this application. + + :type version_label: string + :param version_label: If specified, AWS Elastic Beanstalk restricts the + returned descriptions to include only those that are associated + with this application version. + + :type environment_ids: list + :param environment_ids: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to include only those that have the + specified IDs. + + :type environment_names: list + :param environment_names: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to include only those that have the + specified names. + + :type include_deleted: boolean + :param include_deleted: Indicates whether to include deleted + environments: true: Environments that have been deleted after + IncludedDeletedBackTo are displayed. false: Do not include deleted + environments. + + :type included_deleted_back_to: timestamp + :param included_deleted_back_to: If specified when IncludeDeleted is + set to true, then environments deleted after this date are + displayed. + """ + params = {} + if application_name: + params['ApplicationName'] = application_name + if version_label: + params['VersionLabel'] = version_label + if environment_ids: + self.build_list_params(params, environment_ids, + 'EnvironmentIds.member') + if environment_names: + self.build_list_params(params, environment_names, + 'EnvironmentNames.member') + if include_deleted: + params['IncludeDeleted'] = self._encode_bool(include_deleted) + if included_deleted_back_to: + params['IncludedDeletedBackTo'] = included_deleted_back_to + return self._get_response('DescribeEnvironments', params) + + def describe_events(self, application_name=None, version_label=None, + template_name=None, environment_id=None, + environment_name=None, request_id=None, severity=None, + start_time=None, end_time=None, max_records=None, + next_token=None): + """Returns event descriptions matching criteria up to the last 6 weeks. + + :type application_name: string + :param application_name: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to include only those associated with + this application. + + :type version_label: string + :param version_label: If specified, AWS Elastic Beanstalk restricts the + returned descriptions to those associated with this application + version. + + :type template_name: string + :param template_name: If specified, AWS Elastic Beanstalk restricts the + returned descriptions to those that are associated with this + environment configuration. + + :type environment_id: string + :param environment_id: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to those associated with this + environment. + + :type environment_name: string + :param environment_name: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to those associated with this + environment. + + :type request_id: string + :param request_id: If specified, AWS Elastic Beanstalk restricts the + described events to include only those associated with this request + ID. + + :type severity: string + :param severity: If specified, limits the events returned from this + call to include only those with the specified severity or higher. + + :type start_time: timestamp + :param start_time: If specified, AWS Elastic Beanstalk restricts the + returned descriptions to those that occur on or after this time. + + :type end_time: timestamp + :param end_time: If specified, AWS Elastic Beanstalk restricts the + returned descriptions to those that occur up to, but not including, + the EndTime. + + :type max_records: integer + :param max_records: Specifies the maximum number of events that can be + returned, beginning with the most recent event. + + :type next_token: string + :param next_token: Pagination token. If specified, the events return + the next batch of results. + """ + params = {} + if application_name: + params['ApplicationName'] = application_name + if version_label: + params['VersionLabel'] = version_label + if template_name: + params['TemplateName'] = template_name + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + if request_id: + params['RequestId'] = request_id + if severity: + params['Severity'] = severity + if start_time: + params['StartTime'] = start_time + if end_time: + params['EndTime'] = end_time + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + return self._get_response('DescribeEvents', params) + + def list_available_solution_stacks(self): + """Returns a list of the available solution stack names.""" + return self._get_response('ListAvailableSolutionStacks', params={}) + + def rebuild_environment(self, environment_id=None, environment_name=None): + """ + Deletes and recreates all of the AWS resources (for example: + the Auto Scaling group, load balancer, etc.) for a specified + environment and forces a restart. + + :type environment_id: string + :param environment_id: The ID of the environment to rebuild. + Condition: You must specify either this or an EnvironmentName, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment to rebuild. + Condition: You must specify either this or an EnvironmentId, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + + :raises: InsufficientPrivilegesException + """ + params = {} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('RebuildEnvironment', params) + + def request_environment_info(self, info_type='tail', environment_id=None, + environment_name=None): + """ + Initiates a request to compile the specified type of + information of the deployed environment. Setting the InfoType + to tail compiles the last lines from the application server log + files of every Amazon EC2 instance in your environment. Use + RetrieveEnvironmentInfo to access the compiled information. + + :type info_type: string + :param info_type: The type of information to request. + + :type environment_id: string + :param environment_id: The ID of the environment of the + requested data. If no such environment is found, + RequestEnvironmentInfo returns an InvalidParameterValue error. + Condition: You must specify either this or an EnvironmentName, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment of the + requested data. If no such environment is found, + RequestEnvironmentInfo returns an InvalidParameterValue error. + Condition: You must specify either this or an EnvironmentId, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + """ + params = {'InfoType': info_type} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('RequestEnvironmentInfo', params) + + def restart_app_server(self, environment_id=None, environment_name=None): + """ + Causes the environment to restart the application container + server running on each Amazon EC2 instance. + + :type environment_id: string + :param environment_id: The ID of the environment to restart the server + for. Condition: You must specify either this or an + EnvironmentName, or both. If you do not specify either, AWS Elastic + Beanstalk returns MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment to restart the + server for. Condition: You must specify either this or an + EnvironmentId, or both. If you do not specify either, AWS Elastic + Beanstalk returns MissingRequiredParameter error. + """ + params = {} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('RestartAppServer', params) + + def retrieve_environment_info(self, info_type='tail', environment_id=None, + environment_name=None): + """ + Retrieves the compiled information from a RequestEnvironmentInfo + request. + + :type info_type: string + :param info_type: The type of information to retrieve. + + :type environment_id: string + :param environment_id: The ID of the data's environment. If no such + environment is found, returns an InvalidParameterValue error. + Condition: You must specify either this or an EnvironmentName, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the data's environment. If no such + environment is found, returns an InvalidParameterValue error. + Condition: You must specify either this or an EnvironmentId, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + """ + params = {'InfoType': info_type} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('RetrieveEnvironmentInfo', params) + + def swap_environment_cnames(self, source_environment_id=None, + source_environment_name=None, + destination_environment_id=None, + destination_environment_name=None): + """Swaps the CNAMEs of two environments. + + :type source_environment_id: string + :param source_environment_id: The ID of the source environment. + Condition: You must specify at least the SourceEnvironmentID or the + SourceEnvironmentName. You may also specify both. If you specify + the SourceEnvironmentId, you must specify the + DestinationEnvironmentId. + + :type source_environment_name: string + :param source_environment_name: The name of the source environment. + Condition: You must specify at least the SourceEnvironmentID or the + SourceEnvironmentName. You may also specify both. If you specify + the SourceEnvironmentName, you must specify the + DestinationEnvironmentName. + + :type destination_environment_id: string + :param destination_environment_id: The ID of the destination + environment. Condition: You must specify at least the + DestinationEnvironmentID or the DestinationEnvironmentName. You may + also specify both. You must specify the SourceEnvironmentId with + the DestinationEnvironmentId. + + :type destination_environment_name: string + :param destination_environment_name: The name of the destination + environment. Condition: You must specify at least the + DestinationEnvironmentID or the DestinationEnvironmentName. You may + also specify both. You must specify the SourceEnvironmentName with + the DestinationEnvironmentName. + """ + params = {} + if source_environment_id: + params['SourceEnvironmentId'] = source_environment_id + if source_environment_name: + params['SourceEnvironmentName'] = source_environment_name + if destination_environment_id: + params['DestinationEnvironmentId'] = destination_environment_id + if destination_environment_name: + params['DestinationEnvironmentName'] = destination_environment_name + return self._get_response('SwapEnvironmentCNAMEs', params) + + def terminate_environment(self, environment_id=None, environment_name=None, + terminate_resources=None): + """Terminates the specified environment. + + :type environment_id: string + :param environment_id: The ID of the environment to terminate. + Condition: You must specify either this or an EnvironmentName, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment to terminate. + Condition: You must specify either this or an EnvironmentId, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + + :type terminate_resources: boolean + :param terminate_resources: Indicates whether the associated AWS + resources should shut down when the environment is terminated: + true: (default) The user AWS resources (for example, the Auto + Scaling group, LoadBalancer, etc.) are terminated along with the + environment. false: The environment is removed from the AWS + Elastic Beanstalk but the AWS resources continue to operate. For + more information, see the AWS Elastic Beanstalk User Guide. + Default: true Valid Values: true | false + + :raises: InsufficientPrivilegesException + """ + params = {} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + if terminate_resources: + params['TerminateResources'] = self._encode_bool( + terminate_resources) + return self._get_response('TerminateEnvironment', params) + + def update_application(self, application_name, description=None): + """ + Updates the specified application to have the specified + properties. + + :type application_name: string + :param application_name: The name of the application to update. + If no such application is found, UpdateApplication returns an + InvalidParameterValue error. + + :type description: string + :param description: A new description for the application. Default: If + not specified, AWS Elastic Beanstalk does not update the + description. + """ + params = {'ApplicationName': application_name} + if description: + params['Description'] = description + return self._get_response('UpdateApplication', params) + + def update_application_version(self, application_name, version_label, + description=None): + """Updates the application version to have the properties. + + :type application_name: string + :param application_name: The name of the application associated with + this version. If no application is found with this name, + UpdateApplication returns an InvalidParameterValue error. + + :type version_label: string + :param version_label: The name of the version to update. If no + application version is found with this label, UpdateApplication + returns an InvalidParameterValue error. + + :type description: string + :param description: A new description for this release. + """ + params = {'ApplicationName': application_name, + 'VersionLabel': version_label} + if description: + params['Description'] = description + return self._get_response('UpdateApplicationVersion', params) + + def update_configuration_template(self, application_name, template_name, + description=None, option_settings=None, + options_to_remove=None): + """ + Updates the specified configuration template to have the + specified properties or configuration option values. + + :type application_name: string + :param application_name: The name of the application associated with + the configuration template to update. If no application is found + with this name, UpdateConfigurationTemplate returns an + InvalidParameterValue error. + + :type template_name: string + :param template_name: The name of the configuration template to update. + If no configuration template is found with this name, + UpdateConfigurationTemplate returns an InvalidParameterValue error. + + :type description: string + :param description: A new description for the configuration. + + :type option_settings: list + :param option_settings: A list of configuration option settings to + update with the new specified option value. + + :type options_to_remove: list + :param options_to_remove: A list of configuration options to remove + from the configuration set. Constraint: You can remove only + UserDefined configuration options. + + :raises: InsufficientPrivilegesException + """ + params = {'ApplicationName': application_name, + 'TemplateName': template_name} + if description: + params['Description'] = description + if option_settings: + self._build_list_params(params, option_settings, + 'OptionSettings.member', + ('Namespace', 'OptionName', 'Value')) + if options_to_remove: + self.build_list_params(params, options_to_remove, + 'OptionsToRemove.member') + return self._get_response('UpdateConfigurationTemplate', params) + + def update_environment(self, environment_id=None, environment_name=None, + version_label=None, template_name=None, + description=None, option_settings=None, + options_to_remove=None, tier_name=None, + tier_type=None, tier_version='1.0'): + """ + Updates the environment description, deploys a new application + version, updates the configuration settings to an entirely new + configuration template, or updates select configuration option + values in the running environment. Attempting to update both + the release and configuration is not allowed and AWS Elastic + Beanstalk returns an InvalidParameterCombination error. When + updating the configuration settings to a new template or + individual settings, a draft configuration is created and + DescribeConfigurationSettings for this environment returns two + setting descriptions with different DeploymentStatus values. + + :type environment_id: string + :param environment_id: The ID of the environment to update. If no + environment with this ID exists, AWS Elastic Beanstalk returns an + InvalidParameterValue error. Condition: You must specify either + this or an EnvironmentName, or both. If you do not specify either, + AWS Elastic Beanstalk returns MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment to update. If no + environment with this name exists, AWS Elastic Beanstalk returns an + InvalidParameterValue error. Condition: You must specify either + this or an EnvironmentId, or both. If you do not specify either, + AWS Elastic Beanstalk returns MissingRequiredParameter error. + + :type version_label: string + :param version_label: If this parameter is specified, AWS Elastic + Beanstalk deploys the named application version to the environment. + If no such application version is found, returns an + InvalidParameterValue error. + + :type template_name: string + :param template_name: If this parameter is specified, AWS Elastic + Beanstalk deploys this configuration template to the environment. + If no such configuration template is found, AWS Elastic Beanstalk + returns an InvalidParameterValue error. + + :type description: string + :param description: If this parameter is specified, AWS Elastic + Beanstalk updates the description of this environment. + + :type option_settings: list + :param option_settings: If specified, AWS Elastic Beanstalk updates the + configuration set associated with the running environment and sets + the specified configuration options to the requested value. + + :type options_to_remove: list + :param options_to_remove: A list of custom user-defined configuration + options to remove from the configuration set for this environment. + + :type tier_name: string + :param tier_name: The name of the tier. Valid values are + "WebServer" and "Worker". Defaults to "WebServer". + The ``tier_name`` and a ``tier_type`` parameters are + related and the values provided must be valid. + The possible combinations are: + + * "WebServer" and "Standard" (the default) + * "Worker" and "SQS/HTTP" + + :type tier_type: string + :param tier_type: The type of the tier. Valid values are + "Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP" + if ``tier_name`` is "Worker". Defaults to "Standard". + + :type tier_version: string + :type tier_version: The version of the tier. Valid values + currently are "1.0". Defaults to "1.0". + + :raises: InsufficientPrivilegesException + """ + params = {} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + if version_label: + params['VersionLabel'] = version_label + if template_name: + params['TemplateName'] = template_name + if description: + params['Description'] = description + if option_settings: + self._build_list_params(params, option_settings, + 'OptionSettings.member', + ('Namespace', 'OptionName', 'Value')) + if options_to_remove: + self.build_list_params(params, options_to_remove, + 'OptionsToRemove.member') + if tier_name and tier_type and tier_version: + params['Tier.Name'] = tier_name + params['Tier.Type'] = tier_type + params['Tier.Version'] = tier_version + return self._get_response('UpdateEnvironment', params) + + def validate_configuration_settings(self, application_name, + option_settings, template_name=None, + environment_name=None): + """ + Takes a set of configuration settings and either a + configuration template or environment, and determines whether + those values are valid. This action returns a list of messages + indicating any errors or warnings associated with the selection + of option values. + + :type application_name: string + :param application_name: The name of the application that the + configuration template or environment belongs to. + + :type template_name: string + :param template_name: The name of the configuration template to + validate the settings against. Condition: You cannot specify both + this and an environment name. + + :type environment_name: string + :param environment_name: The name of the environment to validate the + settings against. Condition: You cannot specify both this and a + configuration template name. + + :type option_settings: list + :param option_settings: A list of the options and desired values to + evaluate. + + :raises: InsufficientPrivilegesException + """ + params = {'ApplicationName': application_name} + self._build_list_params(params, option_settings, + 'OptionSettings.member', + ('Namespace', 'OptionName', 'Value')) + if template_name: + params['TemplateName'] = template_name + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('ValidateConfigurationSettings', params) + + def _build_list_params(self, params, user_values, prefix, tuple_names): + # For params such as the ConfigurationOptionSettings, + # they can specify a list of tuples where each tuple maps to a specific + # arg. For example: + # user_values = [('foo', 'bar', 'baz'] + # prefix=MyOption.member + # tuple_names=('One', 'Two', 'Three') + # would result in: + # MyOption.member.1.One = foo + # MyOption.member.1.Two = bar + # MyOption.member.1.Three = baz + for i, user_value in enumerate(user_values, 1): + current_prefix = '%s.%s' % (prefix, i) + for key, value in zip(tuple_names, user_value): + full_key = '%s.%s' % (current_prefix, key) + params[full_key] = value diff --git a/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/response.py b/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/response.py new file mode 100644 index 0000000000000000000000000000000000000000..8128ba1fed17f5194165aebfea5b02babc39635b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/response.py @@ -0,0 +1,704 @@ +"""Classify responses from layer1 and strict type values.""" +from datetime import datetime +from boto.compat import six + + +class BaseObject(object): + + def __repr__(self): + result = self.__class__.__name__ + '{ ' + counter = 0 + for key, value in six.iteritems(self.__dict__): + # first iteration no comma + counter += 1 + if counter > 1: + result += ', ' + result += key + ': ' + result += self._repr_by_type(value) + result += ' }' + return result + + def _repr_by_type(self, value): + # Everything is either a 'Response', 'list', or 'None/str/int/bool'. + result = '' + if isinstance(value, Response): + result += value.__repr__() + elif isinstance(value, list): + result += self._repr_list(value) + else: + result += str(value) + return result + + def _repr_list(self, array): + result = '[' + for value in array: + result += ' ' + self._repr_by_type(value) + ',' + # Check for trailing comma with a space. + if len(result) > 1: + result = result[:-1] + ' ' + result += ']' + return result + + +class Response(BaseObject): + def __init__(self, response): + super(Response, self).__init__() + + if response['ResponseMetadata']: + self.response_metadata = ResponseMetadata(response['ResponseMetadata']) + else: + self.response_metadata = None + + +class ResponseMetadata(BaseObject): + def __init__(self, response): + super(ResponseMetadata, self).__init__() + + self.request_id = str(response['RequestId']) + + +class ApplicationDescription(BaseObject): + def __init__(self, response): + super(ApplicationDescription, self).__init__() + + self.application_name = str(response['ApplicationName']) + self.configuration_templates = [] + if response['ConfigurationTemplates']: + for member in response['ConfigurationTemplates']: + configuration_template = str(member) + self.configuration_templates.append(configuration_template) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.description = str(response['Description']) + self.versions = [] + if response['Versions']: + for member in response['Versions']: + version = str(member) + self.versions.append(version) + + +class ApplicationVersionDescription(BaseObject): + def __init__(self, response): + super(ApplicationVersionDescription, self).__init__() + + self.application_name = str(response['ApplicationName']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.description = str(response['Description']) + if response['SourceBundle']: + self.source_bundle = S3Location(response['SourceBundle']) + else: + self.source_bundle = None + self.version_label = str(response['VersionLabel']) + + +class AutoScalingGroup(BaseObject): + def __init__(self, response): + super(AutoScalingGroup, self).__init__() + + self.name = str(response['Name']) + + +class ConfigurationOptionDescription(BaseObject): + def __init__(self, response): + super(ConfigurationOptionDescription, self).__init__() + + self.change_severity = str(response['ChangeSeverity']) + self.default_value = str(response['DefaultValue']) + self.max_length = int(response['MaxLength']) if response['MaxLength'] else None + self.max_value = int(response['MaxValue']) if response['MaxValue'] else None + self.min_value = int(response['MinValue']) if response['MinValue'] else None + self.name = str(response['Name']) + self.namespace = str(response['Namespace']) + if response['Regex']: + self.regex = OptionRestrictionRegex(response['Regex']) + else: + self.regex = None + self.user_defined = str(response['UserDefined']) + self.value_options = [] + if response['ValueOptions']: + for member in response['ValueOptions']: + value_option = str(member) + self.value_options.append(value_option) + self.value_type = str(response['ValueType']) + + +class ConfigurationOptionSetting(BaseObject): + def __init__(self, response): + super(ConfigurationOptionSetting, self).__init__() + + self.namespace = str(response['Namespace']) + self.option_name = str(response['OptionName']) + self.value = str(response['Value']) + + +class ConfigurationSettingsDescription(BaseObject): + def __init__(self, response): + super(ConfigurationSettingsDescription, self).__init__() + + self.application_name = str(response['ApplicationName']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.deployment_status = str(response['DeploymentStatus']) + self.description = str(response['Description']) + self.environment_name = str(response['EnvironmentName']) + self.option_settings = [] + if response['OptionSettings']: + for member in response['OptionSettings']: + option_setting = ConfigurationOptionSetting(member) + self.option_settings.append(option_setting) + self.solution_stack_name = str(response['SolutionStackName']) + self.template_name = str(response['TemplateName']) + + +class EnvironmentDescription(BaseObject): + def __init__(self, response): + super(EnvironmentDescription, self).__init__() + + self.application_name = str(response['ApplicationName']) + self.cname = str(response['CNAME']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.description = str(response['Description']) + self.endpoint_url = str(response['EndpointURL']) + self.environment_id = str(response['EnvironmentId']) + self.environment_name = str(response['EnvironmentName']) + self.health = str(response['Health']) + if response['Resources']: + self.resources = EnvironmentResourcesDescription(response['Resources']) + else: + self.resources = None + self.solution_stack_name = str(response['SolutionStackName']) + self.status = str(response['Status']) + self.template_name = str(response['TemplateName']) + self.version_label = str(response['VersionLabel']) + + +class EnvironmentInfoDescription(BaseObject): + def __init__(self, response): + super(EnvironmentInfoDescription, self).__init__() + + self.ec2_instance_id = str(response['Ec2InstanceId']) + self.info_type = str(response['InfoType']) + self.message = str(response['Message']) + self.sample_timestamp = datetime.fromtimestamp(response['SampleTimestamp']) + + +class EnvironmentResourceDescription(BaseObject): + def __init__(self, response): + super(EnvironmentResourceDescription, self).__init__() + + self.auto_scaling_groups = [] + if response['AutoScalingGroups']: + for member in response['AutoScalingGroups']: + auto_scaling_group = AutoScalingGroup(member) + self.auto_scaling_groups.append(auto_scaling_group) + self.environment_name = str(response['EnvironmentName']) + self.instances = [] + if response['Instances']: + for member in response['Instances']: + instance = Instance(member) + self.instances.append(instance) + self.launch_configurations = [] + if response['LaunchConfigurations']: + for member in response['LaunchConfigurations']: + launch_configuration = LaunchConfiguration(member) + self.launch_configurations.append(launch_configuration) + self.load_balancers = [] + if response['LoadBalancers']: + for member in response['LoadBalancers']: + load_balancer = LoadBalancer(member) + self.load_balancers.append(load_balancer) + self.triggers = [] + if response['Triggers']: + for member in response['Triggers']: + trigger = Trigger(member) + self.triggers.append(trigger) + + +class EnvironmentResourcesDescription(BaseObject): + def __init__(self, response): + super(EnvironmentResourcesDescription, self).__init__() + + if response['LoadBalancer']: + self.load_balancer = LoadBalancerDescription(response['LoadBalancer']) + else: + self.load_balancer = None + + +class EventDescription(BaseObject): + def __init__(self, response): + super(EventDescription, self).__init__() + + self.application_name = str(response['ApplicationName']) + self.environment_name = str(response['EnvironmentName']) + self.event_date = datetime.fromtimestamp(response['EventDate']) + self.message = str(response['Message']) + self.request_id = str(response['RequestId']) + self.severity = str(response['Severity']) + self.template_name = str(response['TemplateName']) + self.version_label = str(response['VersionLabel']) + + +class Instance(BaseObject): + def __init__(self, response): + super(Instance, self).__init__() + + self.id = str(response['Id']) + + +class LaunchConfiguration(BaseObject): + def __init__(self, response): + super(LaunchConfiguration, self).__init__() + + self.name = str(response['Name']) + + +class Listener(BaseObject): + def __init__(self, response): + super(Listener, self).__init__() + + self.port = int(response['Port']) if response['Port'] else None + self.protocol = str(response['Protocol']) + + +class LoadBalancer(BaseObject): + def __init__(self, response): + super(LoadBalancer, self).__init__() + + self.name = str(response['Name']) + + +class LoadBalancerDescription(BaseObject): + def __init__(self, response): + super(LoadBalancerDescription, self).__init__() + + self.domain = str(response['Domain']) + self.listeners = [] + if response['Listeners']: + for member in response['Listeners']: + listener = Listener(member) + self.listeners.append(listener) + self.load_balancer_name = str(response['LoadBalancerName']) + + +class OptionRestrictionRegex(BaseObject): + def __init__(self, response): + super(OptionRestrictionRegex, self).__init__() + + self.label = response['Label'] + self.pattern = response['Pattern'] + + +class SolutionStackDescription(BaseObject): + def __init__(self, response): + super(SolutionStackDescription, self).__init__() + + self.permitted_file_types = [] + if response['PermittedFileTypes']: + for member in response['PermittedFileTypes']: + permitted_file_type = str(member) + self.permitted_file_types.append(permitted_file_type) + self.solution_stack_name = str(response['SolutionStackName']) + + +class S3Location(BaseObject): + def __init__(self, response): + super(S3Location, self).__init__() + + self.s3_bucket = str(response['S3Bucket']) + self.s3_key = str(response['S3Key']) + + +class Trigger(BaseObject): + def __init__(self, response): + super(Trigger, self).__init__() + + self.name = str(response['Name']) + + +class ValidationMessage(BaseObject): + def __init__(self, response): + super(ValidationMessage, self).__init__() + + self.message = str(response['Message']) + self.namespace = str(response['Namespace']) + self.option_name = str(response['OptionName']) + self.severity = str(response['Severity']) + + +# These are the response objects layer2 uses, one for each layer1 api call. +class CheckDNSAvailabilityResponse(Response): + def __init__(self, response): + response = response['CheckDNSAvailabilityResponse'] + super(CheckDNSAvailabilityResponse, self).__init__(response) + + response = response['CheckDNSAvailabilityResult'] + self.fully_qualified_cname = str(response['FullyQualifiedCNAME']) + self.available = bool(response['Available']) + + +# Our naming convension produces this class name but api names it with more +# capitals. +class CheckDnsAvailabilityResponse(CheckDNSAvailabilityResponse): pass + + +class CreateApplicationResponse(Response): + def __init__(self, response): + response = response['CreateApplicationResponse'] + super(CreateApplicationResponse, self).__init__(response) + + response = response['CreateApplicationResult'] + if response['Application']: + self.application = ApplicationDescription(response['Application']) + else: + self.application = None + + +class CreateApplicationVersionResponse(Response): + def __init__(self, response): + response = response['CreateApplicationVersionResponse'] + super(CreateApplicationVersionResponse, self).__init__(response) + + response = response['CreateApplicationVersionResult'] + if response['ApplicationVersion']: + self.application_version = ApplicationVersionDescription(response['ApplicationVersion']) + else: + self.application_version = None + + +class CreateConfigurationTemplateResponse(Response): + def __init__(self, response): + response = response['CreateConfigurationTemplateResponse'] + super(CreateConfigurationTemplateResponse, self).__init__(response) + + response = response['CreateConfigurationTemplateResult'] + self.application_name = str(response['ApplicationName']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.deployment_status = str(response['DeploymentStatus']) + self.description = str(response['Description']) + self.environment_name = str(response['EnvironmentName']) + self.option_settings = [] + if response['OptionSettings']: + for member in response['OptionSettings']: + option_setting = ConfigurationOptionSetting(member) + self.option_settings.append(option_setting) + self.solution_stack_name = str(response['SolutionStackName']) + self.template_name = str(response['TemplateName']) + + +class CreateEnvironmentResponse(Response): + def __init__(self, response): + response = response['CreateEnvironmentResponse'] + super(CreateEnvironmentResponse, self).__init__(response) + + response = response['CreateEnvironmentResult'] + self.application_name = str(response['ApplicationName']) + self.cname = str(response['CNAME']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.description = str(response['Description']) + self.endpoint_url = str(response['EndpointURL']) + self.environment_id = str(response['EnvironmentId']) + self.environment_name = str(response['EnvironmentName']) + self.health = str(response['Health']) + if response['Resources']: + self.resources = EnvironmentResourcesDescription(response['Resources']) + else: + self.resources = None + self.solution_stack_name = str(response['SolutionStackName']) + self.status = str(response['Status']) + self.template_name = str(response['TemplateName']) + self.version_label = str(response['VersionLabel']) + + +class CreateStorageLocationResponse(Response): + def __init__(self, response): + response = response['CreateStorageLocationResponse'] + super(CreateStorageLocationResponse, self).__init__(response) + + response = response['CreateStorageLocationResult'] + self.s3_bucket = str(response['S3Bucket']) + + +class DeleteApplicationResponse(Response): + def __init__(self, response): + response = response['DeleteApplicationResponse'] + super(DeleteApplicationResponse, self).__init__(response) + + +class DeleteApplicationVersionResponse(Response): + def __init__(self, response): + response = response['DeleteApplicationVersionResponse'] + super(DeleteApplicationVersionResponse, self).__init__(response) + + +class DeleteConfigurationTemplateResponse(Response): + def __init__(self, response): + response = response['DeleteConfigurationTemplateResponse'] + super(DeleteConfigurationTemplateResponse, self).__init__(response) + + +class DeleteEnvironmentConfigurationResponse(Response): + def __init__(self, response): + response = response['DeleteEnvironmentConfigurationResponse'] + super(DeleteEnvironmentConfigurationResponse, self).__init__(response) + + +class DescribeApplicationVersionsResponse(Response): + def __init__(self, response): + response = response['DescribeApplicationVersionsResponse'] + super(DescribeApplicationVersionsResponse, self).__init__(response) + + response = response['DescribeApplicationVersionsResult'] + self.application_versions = [] + if response['ApplicationVersions']: + for member in response['ApplicationVersions']: + application_version = ApplicationVersionDescription(member) + self.application_versions.append(application_version) + + +class DescribeApplicationsResponse(Response): + def __init__(self, response): + response = response['DescribeApplicationsResponse'] + super(DescribeApplicationsResponse, self).__init__(response) + + response = response['DescribeApplicationsResult'] + self.applications = [] + if response['Applications']: + for member in response['Applications']: + application = ApplicationDescription(member) + self.applications.append(application) + + +class DescribeConfigurationOptionsResponse(Response): + def __init__(self, response): + response = response['DescribeConfigurationOptionsResponse'] + super(DescribeConfigurationOptionsResponse, self).__init__(response) + + response = response['DescribeConfigurationOptionsResult'] + self.options = [] + if response['Options']: + for member in response['Options']: + option = ConfigurationOptionDescription(member) + self.options.append(option) + self.solution_stack_name = str(response['SolutionStackName']) + + +class DescribeConfigurationSettingsResponse(Response): + def __init__(self, response): + response = response['DescribeConfigurationSettingsResponse'] + super(DescribeConfigurationSettingsResponse, self).__init__(response) + + response = response['DescribeConfigurationSettingsResult'] + self.configuration_settings = [] + if response['ConfigurationSettings']: + for member in response['ConfigurationSettings']: + configuration_setting = ConfigurationSettingsDescription(member) + self.configuration_settings.append(configuration_setting) + + +class DescribeEnvironmentResourcesResponse(Response): + def __init__(self, response): + response = response['DescribeEnvironmentResourcesResponse'] + super(DescribeEnvironmentResourcesResponse, self).__init__(response) + + response = response['DescribeEnvironmentResourcesResult'] + if response['EnvironmentResources']: + self.environment_resources = EnvironmentResourceDescription(response['EnvironmentResources']) + else: + self.environment_resources = None + + +class DescribeEnvironmentsResponse(Response): + def __init__(self, response): + response = response['DescribeEnvironmentsResponse'] + super(DescribeEnvironmentsResponse, self).__init__(response) + + response = response['DescribeEnvironmentsResult'] + self.environments = [] + if response['Environments']: + for member in response['Environments']: + environment = EnvironmentDescription(member) + self.environments.append(environment) + + +class DescribeEventsResponse(Response): + def __init__(self, response): + response = response['DescribeEventsResponse'] + super(DescribeEventsResponse, self).__init__(response) + + response = response['DescribeEventsResult'] + self.events = [] + if response['Events']: + for member in response['Events']: + event = EventDescription(member) + self.events.append(event) + self.next_tokent = str(response['NextToken']) + + +class ListAvailableSolutionStacksResponse(Response): + def __init__(self, response): + response = response['ListAvailableSolutionStacksResponse'] + super(ListAvailableSolutionStacksResponse, self).__init__(response) + + response = response['ListAvailableSolutionStacksResult'] + self.solution_stack_details = [] + if response['SolutionStackDetails']: + for member in response['SolutionStackDetails']: + solution_stack_detail = SolutionStackDescription(member) + self.solution_stack_details.append(solution_stack_detail) + self.solution_stacks = [] + if response['SolutionStacks']: + for member in response['SolutionStacks']: + solution_stack = str(member) + self.solution_stacks.append(solution_stack) + + +class RebuildEnvironmentResponse(Response): + def __init__(self, response): + response = response['RebuildEnvironmentResponse'] + super(RebuildEnvironmentResponse, self).__init__(response) + + +class RequestEnvironmentInfoResponse(Response): + def __init__(self, response): + response = response['RequestEnvironmentInfoResponse'] + super(RequestEnvironmentInfoResponse, self).__init__(response) + + +class RestartAppServerResponse(Response): + def __init__(self, response): + response = response['RestartAppServerResponse'] + super(RestartAppServerResponse, self).__init__(response) + + +class RetrieveEnvironmentInfoResponse(Response): + def __init__(self, response): + response = response['RetrieveEnvironmentInfoResponse'] + super(RetrieveEnvironmentInfoResponse, self).__init__(response) + + response = response['RetrieveEnvironmentInfoResult'] + self.environment_info = [] + if response['EnvironmentInfo']: + for member in response['EnvironmentInfo']: + environment_info = EnvironmentInfoDescription(member) + self.environment_info.append(environment_info) + + +class SwapEnvironmentCNAMEsResponse(Response): + def __init__(self, response): + response = response['SwapEnvironmentCNAMEsResponse'] + super(SwapEnvironmentCNAMEsResponse, self).__init__(response) + + +class SwapEnvironmentCnamesResponse(SwapEnvironmentCNAMEsResponse): pass + + +class TerminateEnvironmentResponse(Response): + def __init__(self, response): + response = response['TerminateEnvironmentResponse'] + super(TerminateEnvironmentResponse, self).__init__(response) + + response = response['TerminateEnvironmentResult'] + self.application_name = str(response['ApplicationName']) + self.cname = str(response['CNAME']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.description = str(response['Description']) + self.endpoint_url = str(response['EndpointURL']) + self.environment_id = str(response['EnvironmentId']) + self.environment_name = str(response['EnvironmentName']) + self.health = str(response['Health']) + if response['Resources']: + self.resources = EnvironmentResourcesDescription(response['Resources']) + else: + self.resources = None + self.solution_stack_name = str(response['SolutionStackName']) + self.status = str(response['Status']) + self.template_name = str(response['TemplateName']) + self.version_label = str(response['VersionLabel']) + + +class UpdateApplicationResponse(Response): + def __init__(self, response): + response = response['UpdateApplicationResponse'] + super(UpdateApplicationResponse, self).__init__(response) + + response = response['UpdateApplicationResult'] + if response['Application']: + self.application = ApplicationDescription(response['Application']) + else: + self.application = None + + +class UpdateApplicationVersionResponse(Response): + def __init__(self, response): + response = response['UpdateApplicationVersionResponse'] + super(UpdateApplicationVersionResponse, self).__init__(response) + + response = response['UpdateApplicationVersionResult'] + if response['ApplicationVersion']: + self.application_version = ApplicationVersionDescription(response['ApplicationVersion']) + else: + self.application_version = None + + +class UpdateConfigurationTemplateResponse(Response): + def __init__(self, response): + response = response['UpdateConfigurationTemplateResponse'] + super(UpdateConfigurationTemplateResponse, self).__init__(response) + + response = response['UpdateConfigurationTemplateResult'] + self.application_name = str(response['ApplicationName']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.deployment_status = str(response['DeploymentStatus']) + self.description = str(response['Description']) + self.environment_name = str(response['EnvironmentName']) + self.option_settings = [] + if response['OptionSettings']: + for member in response['OptionSettings']: + option_setting = ConfigurationOptionSetting(member) + self.option_settings.append(option_setting) + self.solution_stack_name = str(response['SolutionStackName']) + self.template_name = str(response['TemplateName']) + + +class UpdateEnvironmentResponse(Response): + def __init__(self, response): + response = response['UpdateEnvironmentResponse'] + super(UpdateEnvironmentResponse, self).__init__(response) + + response = response['UpdateEnvironmentResult'] + self.application_name = str(response['ApplicationName']) + self.cname = str(response['CNAME']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.description = str(response['Description']) + self.endpoint_url = str(response['EndpointURL']) + self.environment_id = str(response['EnvironmentId']) + self.environment_name = str(response['EnvironmentName']) + self.health = str(response['Health']) + if response['Resources']: + self.resources = EnvironmentResourcesDescription(response['Resources']) + else: + self.resources = None + self.solution_stack_name = str(response['SolutionStackName']) + self.status = str(response['Status']) + self.template_name = str(response['TemplateName']) + self.version_label = str(response['VersionLabel']) + + +class ValidateConfigurationSettingsResponse(Response): + def __init__(self, response): + response = response['ValidateConfigurationSettingsResponse'] + super(ValidateConfigurationSettingsResponse, self).__init__(response) + + response = response['ValidateConfigurationSettingsResult'] + self.messages = [] + if response['Messages']: + for member in response['Messages']: + message = ValidationMessage(member) + self.messages.append(message) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/wrapper.py b/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..eea1124a9e6d70cd4350556e2fe6b37b7210a22d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/beanstalk/wrapper.py @@ -0,0 +1,29 @@ +"""Wraps layer1 api methods and converts layer1 dict responses to objects.""" +from boto.beanstalk.layer1 import Layer1 +import boto.beanstalk.response +from boto.exception import BotoServerError +import boto.beanstalk.exception as exception + + +def beanstalk_wrapper(func, name): + def _wrapped_low_level_api(*args, **kwargs): + try: + response = func(*args, **kwargs) + except BotoServerError as e: + raise exception.simple(e) + # Turn 'this_is_a_function_name' into 'ThisIsAFunctionNameResponse'. + cls_name = ''.join([part.capitalize() for part in name.split('_')]) + 'Response' + cls = getattr(boto.beanstalk.response, cls_name) + return cls(response) + return _wrapped_low_level_api + + +class Layer1Wrapper(object): + def __init__(self, *args, **kwargs): + self.api = Layer1(*args, **kwargs) + + def __getattr__(self, name): + try: + return beanstalk_wrapper(getattr(self.api, name), name) + except AttributeError: + raise AttributeError("%s has no attribute %r" % (self, name)) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cacerts/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/cacerts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1b2dec79e76ca0e13e32eb2533430c1125b3f7cd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cacerts/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2010 Google Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cacerts/cacerts.txt b/desktop/core/ext-py/boto-2.38.0/boto/cacerts/cacerts.txt new file mode 100644 index 0000000000000000000000000000000000000000..3cf3f26f97281ffdc48f64ffa8a6e931ddb5acfa --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cacerts/cacerts.txt @@ -0,0 +1,3869 @@ +## +## boto/cacerts/cacerts.txt -- Bundle of CA Root Certificates +## +## Certificate data from Mozilla downloaded on: Wed Aug 20 03:12:04 2014 +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## http://hg.mozilla.org/releases/mozilla-release/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## +## Conversion done with mk-ca-bundle.pl verison 1.22. +## SHA1: bf2c15b3019e696660321d2227d942936dc50aa7 +## + + +GTE CyberTrust Global Root +========================== +-----BEGIN CERTIFICATE----- +MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYDVQQKEw9HVEUg +Q29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNvbHV0aW9ucywgSW5jLjEjMCEG +A1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJvb3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEz +MjM1OTAwWjB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQL +Ex5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0 +IEdsb2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrHiM3dFw4u +sJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTSr41tiGeA5u2ylc9yMcql +HHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X404Wqk2kmhXBIgD8SFcd5tB8FLztimQID +AQABMA0GCSqGSIb3DQEBBAUAA4GBAG3rGwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMW +M4ETCJ57NE7fQMh017l93PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OF +NMQkpw0PlZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/ +-----END CERTIFICATE----- + +Thawte Server CA +================ +-----BEGIN CERTIFICATE----- +MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UE +AxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5j +b20wHhcNOTYwODAxMDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNV +BAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29u +c3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcG +A1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0 +ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl +/Kj0R1HahbUgdJSGHg91yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg7 +1CcEJRCXL+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGjEzAR +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG7oWDTSEwjsrZqG9J +GubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6eQNuozDJ0uW8NxuOzRAvZim+aKZuZ +GCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZqdq5snUb9kLy78fyGPmJvKP/iiMucEc= +-----END CERTIFICATE----- + +Thawte Premium Server CA +======================== +-----BEGIN CERTIFICATE----- +MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UE +AxMYVGhhd3RlIFByZW1pdW0gU2VydmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZl +ckB0aGF3dGUuY29tMB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYT +AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsGA1UEChMU +VGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRpb24gU2VydmljZXMgRGl2 +aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNlcnZlciBDQTEoMCYGCSqGSIb3DQEJARYZ +cHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2 +aovXwlue2oFBYo847kkEVdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIh +Udib0GfQug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMRuHM/ +qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQQFAAOBgQAm +SCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUIhfzJATj/Tb7yFkJD57taRvvBxhEf +8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JMpAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7t +UCemDaYj+bvLpgcUQg== +-----END CERTIFICATE----- + +Equifax Secure CA +================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE +ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT +B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR +fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW +8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE +CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS +spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 +zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB +BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 +70+sB3c4 +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBAgUAA4GBALtMEivPLCYA +TxQT3ab7/AoRhIzzKBxnki98tsX63/Dolbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59Ah +WM1pF+NEHJwZRDmJXNycAA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2Omuf +Tqj/ZA1k +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCO +FoUgRm1HP9SFIIThbbP4pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71 +lSk8UOg013gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwIDAQAB +MA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSkU01UbSuvDV1Ai2TT +1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7iF6YM40AIOw7n60RzKprxaZLvcRTD +Oaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpYoJ2daZH9 +-----END CERTIFICATE----- + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +GlobalSign Root CA - R2 +======================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 +ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp +s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN +S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL +TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C +ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i +YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN +BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp +9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu +01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 +9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +ValiCert Class 1 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIy +MjM0OFoXDTE5MDYyNTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9YLqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIi +GQj4/xEjm84H9b9pGib+TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCm +DuJWBQ8YTfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0LBwG +lN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLWI8sogTLDAHkY7FkX +icnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPwnXS3qT6gpf+2SQMT2iLM7XGCK5nP +Orf1LXLI +-----END CERTIFICATE----- + +ValiCert Class 2 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MTk1NFoXDTE5MDYyNjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDOOnHK5avIWZJV16vYdA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVC +CSRrCl6zfN1SLUzm1NZ9WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7Rf +ZHM047QSv4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9vUJSZ +SWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTuIYEZoDJJKPTEjlbV +UjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwCW/POuZ6lcg5Ktz885hZo+L7tdEy8 +W9ViH0Pd +-----END CERTIFICATE----- + +RSA Root Certificate 1 +====================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MjIzM1oXDTE5MDYyNjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDjmFGWHOjVsQaBalfDcnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td +3zZxFJmP3MKS8edgkpfs2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89H +BFx1cQqYJJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliEZwgs +3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJn0WuPIqpsHEzXcjF +V9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/APhmcGcwTTYJBtYze4D1gCCAPRX5r +on+jjBXu +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 +EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc +cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw +EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj +055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f +j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 +xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa +t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS +tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM +8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW +Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX +Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt +mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd +RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG +UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Entrust.net Secure Server CA +============================ +-----BEGIN CERTIFICATE----- +MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMCVVMxFDASBgNV +BAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5uZXQvQ1BTIGluY29ycC4gYnkg +cmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRl +ZDE6MDgGA1UEAxMxRW50cnVzdC5uZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eTAeFw05OTA1MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIG +A1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBi +eSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1p +dGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQ +aO2f55M28Qpku0f1BBc/I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5 +gXpa0zf3wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OCAdcw +ggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHboIHYpIHVMIHSMQsw +CQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5l +dC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF +bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu +dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0MFqBDzIwMTkw +NTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8BdiE1U9s/8KAGv7UISX8+1i0Bow +HQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAaMAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EA +BAwwChsEVjQuMAMCBJAwDQYJKoZIhvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyN +Ewr75Ji174z4xRAN95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9 +n9cd2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI= +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0yOTA3MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJ +KoZIhvcNAQEFBQADggEBADubj1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPy +T/4xmf3IDExoU8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5bu/8j72gZyxKT +J1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+bYQLCIt+jerXmCHG8+c8eS9e +nNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/ErfF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +Equifax Secure Global eBusiness CA +================================== +-----BEGIN CERTIFICATE----- +MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBTZWN1cmUgR2xvYmFsIGVCdXNp +bmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIwMDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMx +HDAaBgNVBAoTE0VxdWlmYXggU2VjdXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEds +b2JhbCBlQnVzaW5lc3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRV +PEnCUdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc58O/gGzN +qfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/o5brhTMhHD4ePmBudpxn +hcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAHMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j +BBgwFoAUvqigdHJQa0S3ySPY+6j/s1draGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hs +MA0GCSqGSIb3DQEBBAUAA4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okEN +I7SS+RkAZ70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv8qIY +NMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 1 +============================= +-----BEGIN CERTIFICATE----- +MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENB +LTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQwMDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UE +ChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNz +IENBLTEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ +1MRoRvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBuWqDZQu4a +IZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKwEnv+j6YDAgMBAAGjZjBk +MBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEp4MlIR21kW +Nl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRKeDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQF +AAOBgQB1W6ibAxHm6VZMzfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5 +lSE/9dR+WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN/Bf+ +KpYrtWKmpj29f5JZzVoqgrI3eQ== +-----END CERTIFICATE----- + +AddTrust Low-Value Services Root +================================ +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU +cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw +CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO +ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 +54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr +oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 +Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui +GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w +HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT +RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw +HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt +ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph +iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr +mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj +ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +AddTrust External Root +====================== +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYD +VQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEw +NDgzOFowbzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRU +cnVzdCBFeHRlcm5hbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0Eg +Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvtH7xsD821 ++iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9uMq/NzgtHj6RQa1wVsfw +Tz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzXmk6vBbOmcZSccbNQYArHE504B4YCqOmo +aSYYkKtMsE8jqzpPhNjfzp/haW+710LXa0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy +2xSoRcRdKn23tNbE7qzNE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv7 +7+ldU9U0WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYDVR0P +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0Jvf6xCZU7wO94CTL +VBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRk +VHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZl +j7DYd7usQWxHYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvCNr4TDea9Y355 +e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEXc4g/VhsxOBi0cQ+azcgOno4u +G+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5amnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +AddTrust Public Services Root +============================= +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU +cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ +BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l +dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu +nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i +d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG +Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw +HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G +A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G +A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 +JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL ++YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 +Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H +EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +AddTrust Qualified Certificates Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU +cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx +CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ +IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx +64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 +KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o +L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR +wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU +MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE +BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y +azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG +GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze +RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB +iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +RSA Security 2048 v3 +==================== +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK +ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy +MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb +BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 +Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb +WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH +KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP ++Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E +FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY +v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj +0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj +VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 +nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA +pKnXwiJPZ9d37CAFYd4= +-----END CERTIFICATE----- + +GeoTrust Global CA +================== +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw +MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo +BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet +8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc +T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU +vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk +DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q +zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 +d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 +mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p +XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm +Mw== +-----END CERTIFICATE----- + +GeoTrust Global CA 2 +==================== +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw +MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ +NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k +LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA +Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b +HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH +K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 +srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh +ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL +OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC +x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF +H4z1Ir+rzoPz4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +GeoTrust Universal CA +===================== +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 +MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu +Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t +JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e +RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs +7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d +8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V +qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga +Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB +Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu +KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 +ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 +XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 +qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL +oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK +xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF +KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 +DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK +xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU +p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI +P/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +GeoTrust Universal CA 2 +======================= +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 +MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg +SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 +DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 +j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q +JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a +QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 +WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP +20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn +ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC +SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG +8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 ++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ +4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ +mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq +A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg +Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP +pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d +FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp +gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm +X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +America Online Root Certification Authority 1 +============================================= +-----BEGIN CERTIFICATE----- +MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lkhsmj76CG +v2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym1BW32J/X3HGrfpq/m44z +DyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsWOqMFf6Dch9Wc/HKpoH145LcxVR5lu9Rh +sCFg7RAycsWSJR74kEoYeEfffjA3PlAb2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP +8c9GsEsPPt2IYriMqQkoO3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAUAK3Z +o/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBBQUAA4IBAQB8itEf +GDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkFZu90821fnZmv9ov761KyBZiibyrF +VL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAbLjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft +3OJvx8Fi8eNy1gTIdGcL+oiroQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43g +Kd8hdIaC2y+CMMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds +sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7 +-----END CERTIFICATE----- + +America Online Root Certification Authority 2 +============================================= +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC206B89en +fHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFciKtZHgVdEglZTvYYUAQv8 +f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2JxhP7JsowtS013wMPgwr38oE18aO6lhO +qKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JN +RvCAOVIyD+OEsnpD8l7eXz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0 +gBe4lL8BPeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67Xnfn +6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEqZ8A9W6Wa6897Gqid +FEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZo2C7HK2JNDJiuEMhBnIMoVxtRsX6 +Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3+L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnj +B453cMor9H124HhnAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3Op +aaEg5+31IqEjFNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmnxPBUlgtk87FY +T15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2LHo1YGwRgJfMqZJS5ivmae2p ++DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzcccobGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXg +JXUjhx5c3LqdsKyzadsXg8n33gy8CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//Zoy +zH1kUQ7rVyZ2OuMeIjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgO +ZtMADjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2FAjgQ5ANh +1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUXOm/9riW99XJZZLF0Kjhf +GEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPbAZO1XB4Y3WRayhgoPmMEEf0cjQAPuDff +Z4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQlZvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuP +cX/9XhmgD0uRuMRUvAawRY8mkaKO/qk= +-----END CERTIFICATE----- + +Visa eCommerce Root +=================== +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG +EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug +QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 +WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm +VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL +F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b +RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 +TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI +/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs +GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc +CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW +YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz +zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu +YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +Certum Root CA +============== +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK +ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla +Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u +by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x +wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL +kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ +89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K +Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P +NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ +GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg +GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ +0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS +qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +Comodo Secure Services root +=========================== +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw +MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu +Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi +BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP +9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc +rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC +oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V +p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E +FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj +YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm +aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm +4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL +DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw +pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H +RR3B7Hzs/Sk= +-----END CERTIFICATE----- + +Comodo Trusted Services root +============================ +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw +MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h +bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw +IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 +3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y +/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 +juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS +ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud +DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp +ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl +cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw +uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA +BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l +R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O +9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +QuoVadis Root CA +================ +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE +ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz +MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp +cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD +EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk +J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL +F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL +YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen +AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w +PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y +ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 +MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj +YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs +ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW +Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu +BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw +FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 +tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo +fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul +LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x +gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi +5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi +5nrQNiOKSnQ2+Q== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +Sonera Class 2 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw +NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 +/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT +dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG +f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P +tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH +nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT +XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt +0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI +cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph +Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx +EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH +llpwrN9M +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA +============================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE +ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w +HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh +bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt +vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P +jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca +C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth +vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 +22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV +HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v +dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN +BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR +EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw +MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y +nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +TDC Internet Root CA +==================== +-----BEGIN CERTIFICATE----- +MIIEKzCCAxOgAwIBAgIEOsylTDANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJESzEVMBMGA1UE +ChMMVERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTAeFw0wMTA0MDUx +NjMzMTdaFw0yMTA0MDUxNzAzMTdaMEMxCzAJBgNVBAYTAkRLMRUwEwYDVQQKEwxUREMgSW50ZXJu +ZXQxHTAbBgNVBAsTFFREQyBJbnRlcm5ldCBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxLhAvJHVYx/XmaCLDEAedLdInUaMArLgJF/wGROnN4NrXceO+YQwzho7+vvOi20j +xsNuZp+Jpd/gQlBn+h9sHvTQBda/ytZO5GhgbEaqHF1j4QeGDmUApy6mcca8uYGoOn0a0vnRrEvL +znWv3Hv6gXPU/Lq9QYjUdLP5Xjg6PEOo0pVOd20TDJ2PeAG3WiAfAzc14izbSysseLlJ28TQx5yc +5IogCSEWVmb/Bexb4/DPqyQkXsN/cHoSxNK1EKC2IeGNeGlVRGn1ypYcNIUXJXfi9i8nmHj9eQY6 +otZaQ8H/7AQ77hPv01ha/5Lr7K7a8jcDR0G2l8ktCkEiu7vmpwIDAQABo4IBJTCCASEwEQYJYIZI +AYb4QgEBBAQDAgAHMGUGA1UdHwReMFwwWqBYoFakVDBSMQswCQYDVQQGEwJESzEVMBMGA1UEChMM +VERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTENMAsGA1UEAxMEQ1JM +MTArBgNVHRAEJDAigA8yMDAxMDQwNTE2MzMxN1qBDzIwMjEwNDA1MTcwMzE3WjALBgNVHQ8EBAMC +AQYwHwYDVR0jBBgwFoAUbGQBx/2FbazI2p5QCIUItTxWqFAwHQYDVR0OBBYEFGxkAcf9hW2syNqe +UAiFCLU8VqhQMAwGA1UdEwQFMAMBAf8wHQYJKoZIhvZ9B0EABBAwDhsIVjUuMDo0LjADAgSQMA0G +CSqGSIb3DQEBBQUAA4IBAQBOQ8zR3R0QGwZ/t6T609lN+yOfI1Rb5osvBCiLtSdtiaHsmGnc540m +gwV5dOy0uaOXwTUA/RXaOYE6lTGQ3pfphqiZdwzlWqCE/xIWrG64jcN7ksKsLtB9KOy282A4aW8+ +2ARVPp7MVdK6/rtHBNcK2RYKNCn1WBPVT8+PVkuzHu7TmHnaCB4Mb7j4Fifvwm899qNLPg7kbWzb +O0ESm70NRyN/PErQr8Cv9u8btRXE64PECV90i9kR+8JWsTz4cMo0jUNAE4z9mQNUecYu6oah9jrU +Cbz0vGbMPVjQV0kK7iXiQe4T+Zs4NNEA9X7nlB38aQNiuJkFBT1reBK9sG9l +-----END CERTIFICATE----- + +UTN DATACorp SGC Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ +BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa +MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w +HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy +dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys +raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo +wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA +9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv +33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud +DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 +BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD +LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 +DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 +I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx +EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP +DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +UTN USERFirst Hardware Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd +BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx +OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 +eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz +ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI +wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd +tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 +i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf +Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw +gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF +UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF +BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW +XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 +lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn +iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 +nfhmqA== +-----END CERTIFICATE----- + +Camerfirma Chambers of Commerce Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx +NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp +cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn +MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU +xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH +NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW +DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV +d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud +EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v +cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P +AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh +bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD +VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi +fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD +L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN +UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n +ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 +erfutGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +Camerfirma Global Chambersign Root +================================== +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx +NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt +YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg +MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw +ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J +1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O +by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl +6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c +8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ +BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j +aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B +Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj +aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y +ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA +PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y +gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ +PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 +IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes +t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +NetLock Notary (Class A) Root +============================= +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI +EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j +ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX +DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH +EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD +VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz +cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM +D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ +z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC +/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 +tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 +4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG +A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC +Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv +bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn +LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 +ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz +IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh +IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu +b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh +bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg +Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp +bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 +ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP +ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB +CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr +KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM +8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +NetLock Business (Class B) Root +=============================== +-----BEGIN CERTIFICATE----- +MIIFSzCCBLSgAwIBAgIBaTANBgkqhkiG9w0BAQQFADCBmTELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTIwMAYDVQQDEylOZXRMb2NrIFV6bGV0aSAoQ2xhc3MgQikg +VGFudXNpdHZhbnlraWFkbzAeFw05OTAyMjUxNDEwMjJaFw0xOTAyMjAxNDEwMjJaMIGZMQswCQYD +VQQGEwJIVTERMA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRv +bnNhZ2kgS2Z0LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxMjAwBgNVBAMTKU5ldExvY2sg +VXpsZXRpIChDbGFzcyBCKSBUYW51c2l0dmFueWtpYWRvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB +iQKBgQCx6gTsIKAjwo84YM/HRrPVG/77uZmeBNwcf4xKgZjupNTKihe5In+DCnVMm8Bp2GQ5o+2S +o/1bXHQawEfKOml2mrriRBf8TKPV/riXiK+IA4kfpPIEPsgHC+b5sy96YhQJRhTKZPWLgLViqNhr +1nGTLbO/CVRY7QbrqHvcQ7GhaQIDAQABo4ICnzCCApswEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV +HQ8BAf8EBAMCAAYwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1GSUdZ +RUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pvbGdhbHRh +dGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQuIEEgaGl0 +ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2VnLWJpenRv +c2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUg +YXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJh +c2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBhIGh0dHBz +Oi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVub3J6ZXNA +bmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBhbmQgdGhl +IHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sgQ1BTIGF2 +YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBj +cHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4GBAATbrowXr/gOkDFOzT4JwG06sPgzTEdM +43WIEJessDgVkcYplswhwG08pXTP2IKlOcNl40JwuyKQ433bNXbhoLXan3BukxowOR0w2y7jfLKR +stE3Kfq51hdcR0/jHTjrn9V7lagonhVK0dHQKwCXoOKSNitjrFgBazMpUIaD8QFI +-----END CERTIFICATE----- + +NetLock Express (Class C) Root +============================== +-----BEGIN CERTIFICATE----- +MIIFTzCCBLigAwIBAgIBaDANBgkqhkiG9w0BAQQFADCBmzELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTQwMgYDVQQDEytOZXRMb2NrIEV4cHJlc3N6IChDbGFzcyBD +KSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNTE0MDgxMVoXDTE5MDIyMDE0MDgxMVowgZsxCzAJ +BgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE0MDIGA1UEAxMrTmV0TG9j +ayBFeHByZXNzeiAoQ2xhc3MgQykgVGFudXNpdHZhbnlraWFkbzCBnzANBgkqhkiG9w0BAQEFAAOB +jQAwgYkCgYEA6+ywbGGKIyWvYCDj2Z/8kwvbXY2wobNAOoLO/XXgeDIDhlqGlZHtU/qdQPzm6N3Z +W3oDvV3zOwzDUXmbrVWg6dADEK8KuhRC2VImESLH0iDMgqSaqf64gXadarfSNnU+sYYJ9m5tfk63 +euyucYT2BDMIJTLrdKwWRMbkQJMdf60CAwEAAaOCAp8wggKbMBIGA1UdEwEB/wQIMAYBAf8CAQQw +DgYDVR0PAQH/BAQDAgAGMBEGCWCGSAGG+EIBAQQEAwIABzCCAmAGCWCGSAGG+EIBDQSCAlEWggJN +RklHWUVMRU0hIEV6ZW4gdGFudXNpdHZhbnkgYSBOZXRMb2NrIEtmdC4gQWx0YWxhbm9zIFN6b2xn +YWx0YXRhc2kgRmVsdGV0ZWxlaWJlbiBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBB +IGhpdGVsZXNpdGVzIGZvbHlhbWF0YXQgYSBOZXRMb2NrIEtmdC4gdGVybWVrZmVsZWxvc3NlZy1i +aXp0b3NpdGFzYSB2ZWRpLiBBIGRpZ2l0YWxpcyBhbGFpcmFzIGVsZm9nYWRhc2FuYWsgZmVsdGV0 +ZWxlIGF6IGVsb2lydCBlbGxlbm9yemVzaSBlbGphcmFzIG1lZ3RldGVsZS4gQXogZWxqYXJhcyBs +ZWlyYXNhIG1lZ3RhbGFsaGF0byBhIE5ldExvY2sgS2Z0LiBJbnRlcm5ldCBob25sYXBqYW4gYSBo +dHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIGNpbWVuIHZhZ3kga2VyaGV0byBheiBlbGxlbm9y +emVzQG5ldGxvY2submV0IGUtbWFpbCBjaW1lbi4gSU1QT1JUQU5UISBUaGUgaXNzdWFuY2UgYW5k +IHRoZSB1c2Ugb2YgdGhpcyBjZXJ0aWZpY2F0ZSBpcyBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIENQ +UyBhdmFpbGFibGUgYXQgaHR0cHM6Ly93d3cubmV0bG9jay5uZXQvZG9jcyBvciBieSBlLW1haWwg +YXQgY3BzQG5ldGxvY2submV0LjANBgkqhkiG9w0BAQQFAAOBgQAQrX/XDDKACtiG8XmYta3UzbM2 +xJZIwVzNmtkFLp++UOv0JhQQLdRmF/iewSf98e3ke0ugbLWrmldwpu2gpO0u9f38vf5NNwgMvOOW +gyL1SRt/Syu0VMGAfJlOHdCM7tCs5ZL6dVb+ZKATj7i4Fp1hBWeAyNDYpQcCNJgEjTME1A== +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj +YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH +AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw +Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg +U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 +LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh +cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT +dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC +AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh +3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm +vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk +fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 +fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ +EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl +1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ +lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro +g14= +-----END CERTIFICATE----- + +Taiwan GRCA +=========== +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG +EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X +DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv +dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN +w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 +BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O +1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO +htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov +J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 +Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t +B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB +O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 +lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV +HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 +09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj +Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 +Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU +D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz +DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk +Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk +7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ +CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy ++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS +-----END CERTIFICATE----- + +Swisscom Root CA 1 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 +MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM +MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF +NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe +AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC +b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn +7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN +cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp +WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 +haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY +MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 +MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn +jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ +MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H +VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl +vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl +OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 +1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq +nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy +x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW +NY6E0F/6MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +Certplus Class 2 Primary CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE +BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN +OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy +dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR +5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ +Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO +YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e +e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME +CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ +YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t +L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD +P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R +TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ +7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW +//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +DST Root CA X3 +============== +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK +ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X +DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 +cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT +rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 +UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy +xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d +utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ +MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug +dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE +GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw +RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS +fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +DST ACES CA X6 +============== +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT +MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha +MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE +CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI +DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa +pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow +GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy +MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu +Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy +dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU +CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 +5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t +Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs +vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 +oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 1 +============================================== +-----BEGIN CERTIFICATE----- +MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGDAJUUjEP +MA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykgMjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0 +acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMx +MDI3MTdaFw0xNTAzMjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsg +U2VydGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYDVQQHDAZB +TktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBC +aWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEuxZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GX +yGl8hMW0kWxsE2qkVa2kheiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8i +Si9BB35JYbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5CurKZ +8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1JuTm5Rh8i27fbMx4 +W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51b0dewQIDAQABoxAwDjAMBgNVHRME +BTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46 +sWrv7/hg0Uw2ZkUd82YCdAR7kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxE +q8Sn5RTOPEFhfEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy +B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdAaLX/7KfS0zgY +nNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKSRGQDJereW26fyfJOrN3H +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2 +============================================== +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN +MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr +dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe +LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI +x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g +QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr +5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB +AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt +Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ +hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P +9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 +UrbnBEI= +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ +cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN +b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 +nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge +RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt +tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI +hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K +Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN +NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa +Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG +1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 +MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg +SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv +KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT +FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs +oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ +1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc +q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K +aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p +afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF +AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE +uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 +jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH +z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln +biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh +dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz +j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD +Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ +Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r +fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ +BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv +Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG +SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ +X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE +KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC +Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE +ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +WellsSecure Public Root Certificate Authority +============================================= +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM +F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw +NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl +bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD +VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 +iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 +i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 +bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB +K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB +AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu +cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm +lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB +i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww +GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI +K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 +bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj +qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es +E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ +tylv2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +IGC/A +===== +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD +VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE +Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy +MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI +EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT +STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 +TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW +So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy +HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd +frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ +tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB +egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC +iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK +q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q +MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI +lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF +0mBWWg== +-----END CERTIFICATE----- + +Security Communication EV RootCA1 +================================= +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE +BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl +Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO +/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX +WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z +ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 +bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK +9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm +iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG +Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW +mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW +T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GA CA +=============================== +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE +BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG +A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH +bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD +VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw +IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 +IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 +Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg +Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD +d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ +/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R +LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm +MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 ++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY +okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE +BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL +EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 +MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz +dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT +GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG +d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N +oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc +QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ +PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb +MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG +IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD +VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 +LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A +dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA +4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg +AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA +egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 +Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO +PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv +c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h +cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw +IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT +WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV +MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp +Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal +HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT +nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE +aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK +yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB +S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +AC Ra\xC3\xADz Certic\xC3\xA1mara S.A. +====================================== +-----BEGIN CERTIFICATE----- +MIIGZjCCBE6gAwIBAgIPB35Sk3vgFeNX8GmMy+wMMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNVBAYT +AkNPMUcwRQYDVQQKDD5Tb2NpZWRhZCBDYW1lcmFsIGRlIENlcnRpZmljYWNpw7NuIERpZ2l0YWwg +LSBDZXJ0aWPDoW1hcmEgUy5BLjEjMCEGA1UEAwwaQUMgUmHDrXogQ2VydGljw6FtYXJhIFMuQS4w +HhcNMDYxMTI3MjA0NjI5WhcNMzAwNDAyMjE0MjAyWjB7MQswCQYDVQQGEwJDTzFHMEUGA1UECgw+ +U29jaWVkYWQgQ2FtZXJhbCBkZSBDZXJ0aWZpY2FjacOzbiBEaWdpdGFsIC0gQ2VydGljw6FtYXJh +IFMuQS4xIzAhBgNVBAMMGkFDIFJhw616IENlcnRpY8OhbWFyYSBTLkEuMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAq2uJo1PMSCMI+8PPUZYILrgIem08kBeGqentLhM0R7LQcNzJPNCN +yu5LF6vQhbCnIwTLqKL85XXbQMpiiY9QngE9JlsYhBzLfDe3fezTf3MZsGqy2IiKLUV0qPezuMDU +2s0iiXRNWhU5cxh0T7XrmafBHoi0wpOQY5fzp6cSsgkiBzPZkc0OnB8OIMfuuzONj8LSWKdf/WU3 +4ojC2I+GdV75LaeHM/J4Ny+LvB2GNzmxlPLYvEqcgxhaBvzz1NS6jBUJJfD5to0EfhcSM2tXSExP +2yYe68yQ54v5aHxwD6Mq0Do43zeX4lvegGHTgNiRg0JaTASJaBE8rF9ogEHMYELODVoqDA+bMMCm +8Ibbq0nXl21Ii/kDwFJnmxL3wvIumGVC2daa49AZMQyth9VXAnow6IYm+48jilSH5L887uvDdUhf +HjlvgWJsxS3EF1QZtzeNnDeRyPYL1epjb4OsOMLzP96a++EjYfDIJss2yKHzMI+ko6Kh3VOz3vCa +Mh+DkXkwwakfU5tTohVTP92dsxA7SH2JD/ztA/X7JWR1DhcZDY8AFmd5ekD8LVkH2ZD6mq093ICK +5lw1omdMEWux+IBkAC1vImHFrEsm5VoQgpukg3s0956JkSCXjrdCx2bD0Omk1vUgjcTDlaxECp1b +czwmPS9KvqfJpxAe+59QafMCAwEAAaOB5jCB4zAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQU0QnQ6dfOeXRU+Tows/RtLAMDG2gwgaAGA1UdIASBmDCBlTCBkgYEVR0g +ADCBiTArBggrBgEFBQcCARYfaHR0cDovL3d3dy5jZXJ0aWNhbWFyYS5jb20vZHBjLzBaBggrBgEF +BQcCAjBOGkxMaW1pdGFjaW9uZXMgZGUgZ2FyYW507WFzIGRlIGVzdGUgY2VydGlmaWNhZG8gc2Ug +cHVlZGVuIGVuY29udHJhciBlbiBsYSBEUEMuMA0GCSqGSIb3DQEBBQUAA4ICAQBclLW4RZFNjmEf +AygPU3zmpFmps4p6xbD/CHwso3EcIRNnoZUSQDWDg4902zNc8El2CoFS3UnUmjIz75uny3XlesuX +EpBcunvFm9+7OSPI/5jOCk0iAUgHforA1SBClETvv3eiiWdIG0ADBaGJ7M9i4z0ldma/Jre7Ir5v +/zlXdLp6yQGVwZVR6Kss+LGGIOk/yzVb0hfpKv6DExdA7ohiZVvVO2Dpezy4ydV/NgIlqmjCMRW3 +MGXrfx1IebHPOeJCgBbT9ZMj/EyXyVo3bHwi2ErN0o42gzmRkBDI8ck1fj+404HGIGQatlDCIaR4 +3NAvO2STdPCWkPHv+wlaNECW8DYSwaN0jJN+Qd53i+yG2dIPPy3RzECiiWZIHiCznCNZc6lEc7wk +eZBWN7PGKX6jD/EpOe9+XCgycDWs2rjIdWb8m0w5R44bb5tNAlQiM+9hup4phO9OSzNHdpdqy35f +/RWmnkJDW2ZaiogN9xa5P1FlK2Zqi9E4UqLWRhH6/JocdJ6PlwsCT2TG9WjTSy3/pDceiz+/RL5h +RqGEPQgnTIEgd4kI6mdAXmwIUV80WoyWaM3X94nCHNMyAK9Sy9NgWyo6R35rMDOhYil/SrnhLecU +Iw4OGEfhefwVVdCx/CVxY3UzHCMrr1zZ7Ud3YA47Dx7SwNxkBYn8eNZcLCZDqQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 2 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYw +MTEyMTQzODQzWhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jftMjWQ+nEdVl//OEd+DFw +IxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKguNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2 +xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2JXjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQ +Xa7pIXSSTYtZgo+U4+lK8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7u +SNQZu+995OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3kUrL84J6E1wIqzCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iSGNn3Bzn1LL4G +dXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprtZjluS5TmVfwLG4t3wVMTZonZ +KNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8au0WOB9/WIFaGusyiC2y8zl3gK9etmF1Kdsj +TYjKUCjLhdLTEKJZbtOTVAB6okaVhgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kP +JOzHdiEoZa5X6AeIdUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfk +vQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 3 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYw +MTEyMTQ0MTU3WhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJWHt4bNwcwIi9v8Qbxq63W +yKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+QVl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo +6SI7dYnWRBpl8huXJh0obazovVkdKyT21oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZ +uV3bOx4a+9P/FRQI2AlqukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk +2ZyqBwi1Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NXXAek0CSnwPIA1DCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlNirTzwppVMXzE +O2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8TtXqluJucsG7Kv5sbviRmEb8 +yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9 +IJqDnxrcOfHFcqMRA/07QlIp2+gB95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal +092Y+tTmBvTwtiBjS+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc +5A== +-----END CERTIFICATE----- + +TC TrustCenter Universal CA I +============================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcN +MDYwMzIyMTU1NDI4WhcNMjUxMjMxMjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMg +VHJ1c3RDZW50ZXIgR21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYw +JAYDVQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSRJJZ4Hgmgm5qVSkr1YnwC +qMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3TfCZdzHd55yx4Oagmcw6iXSVphU9VDprv +xrlE4Vc93x9UIuVvZaozhDrzznq+VZeujRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtw +ag+1m7Z3W0hZneTvWq3zwZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9O +gdwZu5GQfezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYDVR0j +BBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0GCSqGSIb3DQEBBQUAA4IBAQAo0uCG +1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X17caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/Cy +vwbZ71q+s2IhtNerNXxTPqYn8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3 +ghUJGooWMNjsydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT +ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/2TYcuiUaUj0a +7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY +-----END CERTIFICATE----- + +Deutsche Telekom Root CA 2 +========================== +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT +RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG +A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 +MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G +A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS +b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 +bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI +KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY +AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK +Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV +jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV +HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr +E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy +zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 +rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G +dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +ComSign Secured CA +================== +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAwPDEbMBkGA1UE +AxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0w +NDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwxGzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBD +QTEQMA4GA1UEChMHQ29tU2lnbjELMAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDGtWhfHZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs +49ohgHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sWv+bznkqH +7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ueMv5WJDmyVIRD9YTC2LxB +kMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d1 +9guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUw +AwEB/zBEBgNVHR8EPTA7MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29t +U2lnblNlY3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58ADsA +j8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkqhkiG9w0BAQUFAAOC +AQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7piL1DRYHjZiM/EoZNGeQFsOY3wo3a +BijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtCdsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtp +FhpFfTMDZflScZAmlaxMDPWLkz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP +51qJThRv4zdLhfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- + +Cybertrust Global Root +====================== +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li +ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 +MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD +ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA ++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW +0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL +AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin +89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT +8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 +MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G +A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO +lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi +5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 +hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T +X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 +============================================================================================================================= +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH +DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q +aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry +b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV +BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg +S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 +MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl +IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF +n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl +IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft +dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl +cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO +Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 +xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR +6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd +BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 +N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT +y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh +LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M +dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +Buypass Class 2 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 +MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M +cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 +0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 +0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R +uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV +1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt +7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 +fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w +wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +Buypass Class 3 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMyBDQSAxMB4XDTA1 +MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKx +ifZgisRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//zNIqeKNc0 +n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI+MkcVyzwPX6UvCWThOia +AJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2RhzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c +1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFPBdy7 +pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27sEzNxZy5p+qksP2bA +EllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2mSlf56oBzKwzqBwKu5HEA6BvtjT5 +htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yCe/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQj +el/wroQk5PMr+4okoyeYZdowdXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 +-----END CERTIFICATE----- + +EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 +========================================================================== +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg +QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe +Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt +IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by +X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b +gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr +eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ +TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy +Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn +uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI +qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm +ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 +Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW +Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t +FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm +zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k +XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT +bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU +RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK +1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt +2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ +Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 +AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +CNNIC ROOT +========== +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE +ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw +OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD +o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz +VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT +VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or +czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK +y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC +wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S +lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 +Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM +O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 +BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 +G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m +mxE= +-----END CERTIFICATE----- + +ApplicationCA - Japanese Government +=================================== +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT +SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw +MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl +cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 +fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN +wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE +jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu +nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU +WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV +BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD +vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs +o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g +/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD +io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW +dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G3 +============================================= +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz +NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo +YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT +LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j +K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE +c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C +IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu +dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr +2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 +cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE +Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s +t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +thawte Primary Root CA - G2 +=========================== +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC +VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu +IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg +Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV +MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG +b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt +IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS +LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 +8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU +mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN +G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K +rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +thawte Primary Root CA - G3 +=========================== +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w +ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD +VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG +A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At +P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC ++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY +7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW +vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ +KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK +A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC +8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm +er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G2 +============================================= +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 +OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl +b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG +BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc +KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ +EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m +ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 +npaqBA+K +-----END CERTIFICATE----- + +VeriSign Universal Root Certification Authority +=============================================== +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj +1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP +MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 +9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I +AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR +tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G +CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O +a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 +Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx +Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx +P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P +wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 +mJO37M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G4 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC +VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 +b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz +ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU +cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo +b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 +Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz +rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw +HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u +Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD +A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx +AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) Főtanúsítvány +============================================ +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G2 +================================== +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ +5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn +vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj +CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil +e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR +OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI +CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 +48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi +trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 +qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB +AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC +ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA +A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz ++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj +f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN +kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk +CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF +URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb +CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h +oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV +IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm +66+KAQ== +-----END CERTIFICATE----- + +CA Disig +======== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK +QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw +MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz +bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm +GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD +Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo +hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt +ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w +gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P +AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz +aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff +ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa +BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t +WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 +mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K +ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA +4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +Juur-SK +======= +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA +c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw +DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG +SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy +aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf +TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC ++Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw +UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa +Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF +MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD +HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh +AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA +cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr +AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw +cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G +A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo +ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL +abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 +IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh +Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 +yyqcjg== +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +ACEDICOM Root +============= +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD +T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 +MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG +A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk +WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD +YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew +MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb +m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk +HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT +xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 +3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 +2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq +TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz +4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU +9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg +aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP +eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk +zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 +ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI +KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq +nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE +I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp +MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o +tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority +======================================================= +-----BEGIN CERTIFICATE----- +MIICPDCCAaUCEDyRMcsf9tAbDpq40ES/Er4wDQYJKoZIhvcNAQEFBQAwXzELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVow +XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 +f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol +hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBABByUqkFFBky +CEHwxWsKzH4PIRnN5GfcX6kb5sroc50i2JhucwNhkcV8sEVAbkSdjbCxlnRhLQ2pRdKkkirWmnWX +bj9T/UWZYB2oK0z5XqcJ2HUw19JlYD1n1khVdWk/kfVIC0dpImmClr7JyDiGSnoscxlIaU5rfGW/ +D/xwzoiQ +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +E-Guven Kok Elektronik Sertifika Hizmet Saglayicisi +=================================================== +-----BEGIN CERTIFICATE----- +MIIDtjCCAp6gAwIBAgIQRJmNPMADJ72cdpW56tustTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE8MDoGA1UEAxMz +ZS1HdXZlbiBLb2sgRWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhZ2xheWljaXNpMB4XDTA3 +MDEwNDExMzI0OFoXDTE3MDEwNDExMzI0OFowdTELMAkGA1UEBhMCVFIxKDAmBgNVBAoTH0VsZWt0 +cm9uaWsgQmlsZ2kgR3V2ZW5saWdpIEEuUy4xPDA6BgNVBAMTM2UtR3V2ZW4gS29rIEVsZWt0cm9u +aWsgU2VydGlmaWthIEhpem1ldCBTYWdsYXlpY2lzaTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAMMSIJ6wXgBljU5Gu4Bc6SwGl9XzcslwuedLZYDBS75+PNdUMZTe1RK6UxYC6lhj71vY +8+0qGqpxSKPcEC1fX+tcS5yWCEIlKBHMilpiAVDV6wlTL/jDj/6z/P2douNffb7tC+Bg62nsM+3Y +jfsSSYMAyYuXjDtzKjKzEve5TfL0TW3H5tYmNwjy2f1rXKPlSFxYvEK+A1qBuhw1DADT9SN+cTAI +JjjcJRFHLfO6IxClv7wC90Nex/6wN1CZew+TzuZDLMN+DfIcQ2Zgy2ExR4ejT669VmxMvLz4Bcpk +9Ok0oSy1c+HCPujIyTQlCFzz7abHlJ+tiEMl1+E5YP6sOVkCAwEAAaNCMEAwDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ/uRLOU1fqRTy7ZVZoEVtstxNulMA0GCSqG +SIb3DQEBBQUAA4IBAQB/X7lTW2M9dTLn+sR0GstG30ZpHFLPqk/CaOv/gKlR6D1id4k9CnU58W5d +F4dvaAXBlGzZXd/aslnLpRCKysw5zZ/rTt5S/wzw9JKp8mxTq5vSR6AfdPebmvEvFZ96ZDAYBzwq +D2fK/A+JYZ1lpTzlvBNbCNvj/+27BrtqBrF6T2XGgv0enIu1De5Iu7i9qgi0+6N8y5/NkHZchpZ4 +Vwpm+Vganf2XKWDeEaaQHBkc7gGWIjQ0LpH5t8Qn0Xvmv/uARFoW5evg1Ao4vOSR49XrXMGs3xtq +fJ7lddK2l4fbzIcrQzqECK+rPNv3PGYxhrCdU3nt+CPeQuMtgvEP5fqX +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Chambers of Commerce Root - 2008 +================================ +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy +Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl +ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF +EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl +cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA +XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj +h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ +ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk +NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g +D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 +lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ +0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 +EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI +G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ +BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh +bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh +bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC +CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH +AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 +wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH +3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU +RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 +M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 +YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF +9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK +zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG +nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ +-----END CERTIFICATE----- + +Global Chambersign Root - 2008 +============================== +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx +NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg +Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ +QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf +VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf +XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 +ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB +/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA +TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M +H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe +Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF +HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB +AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT +BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE +BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm +aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm +aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp +1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 +dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG +/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 +ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s +dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg +9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH +foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du +qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr +P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq +c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +Certinomis - Autorité Racine +============================= +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg +LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG +A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw +JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa +wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly +Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw +2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N +jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q +c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC +lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb +xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g +530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna +4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x +WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva +R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 +nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B +CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv +JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE +qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b +WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE +wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ +vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +Root CA Generalitat Valenciana +============================== +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE +ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 +IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 +WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE +CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 +F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B +ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ +D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte +JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB +AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n +dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB +ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl +AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA +YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy +AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt +AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA +YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu +AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA +OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 +dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV +BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S +b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh +TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz +Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 +NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH +iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt ++GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +A-Trust-nQual-03 +================ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE +Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy +a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R +dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw +RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 +ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 +c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA +zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n +yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE +SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 +iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V +cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV +eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 +ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr +sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd +JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 +ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +EC-ACC +====== +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE +BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w +ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD +VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE +CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT +BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 +MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt +SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl +Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh +cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK +w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT +ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 +HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a +E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw +0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD +VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 +Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l +dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ +lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa +Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe +l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 +E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D +5EI= +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2011 +======================================================= +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT +O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y +aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT +AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo +IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI +1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa +71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u +8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH +3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ +MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 +MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu +b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt +XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD +/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N +7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Trustis FPS Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 +IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV +BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ +RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk +H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa +cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt +o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA +AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd +BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c +GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC +yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P +8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV +l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl +iB6XzCGcKQENZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ +Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 +dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu +c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv +bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 +aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t +L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG +cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 +fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm +N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN +Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T +tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX +e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA +2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs +HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib +D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +StartCom Certification Authority G2 +=================================== +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE +ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O +o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG +4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi +Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul +Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs +O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H +vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L +nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS +FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa +z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ +KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk +J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ +JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG +/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc +nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld +blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc +l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm +7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm +obp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +EE Certification Centre Root CA +=============================== +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy +dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw +MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB +UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy +ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM +TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 +rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw +93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN +P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ +MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF +BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj +xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM +lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU +3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM +dcGWxZ0= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2007 +================================================= +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4X +DTA3MTIyNTE4MzcxOVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxl +a3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMCVFIxDzAN +BgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp +bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4gKGMpIEFyYWzEsWsgMjAwNzCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9N +YvDdE3ePYakqtdTyuTFYKTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQv +KUmi8wUG+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveGHtya +KhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6PIzdezKKqdfcYbwnT +rqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M733WB2+Y8a+xwXrXgTW4qhe04MsC +AwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHkYb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/s +Px+EnWVUXKgWAkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I +aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5mxRZNTZPz/OO +Xl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsaXRik7r4EW5nVcV9VZWRi1aKb +BFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZqxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAK +poRq0Tl9 +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTAe +Fw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NThaME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxE +LVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOAD +ER03UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42tSHKXzlA +BF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9RySPocq60vFYJfxLLHLGv +KZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsMlFqVlNpQmvH/pStmMaTJOKDfHR+4CS7z +p+hnUquVH+BGPtikw8paxTGA6Eian5Rp/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUC +AwEAAaOCARowggEWMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ +4PGEMA4GA1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVjdG9y +eS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUyMENBJTIwMiUyMDIw +MDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwQ6BBoD+G +PWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAw +OS5jcmwwDQYJKoZIhvcNAQELBQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm +2H6NMLVwMeniacfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4KzCUqNQT4YJEV +dT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8PIWmawomDeCTmGCufsYkl4ph +X5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3YJohw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 EV 2009 +================================= +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUwNDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfS +egpnljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM03TP1YtHh +zRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6ZqQTMFexgaDbtCHu39b+T +7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lRp75mpoo6Kr3HGrHhFPC+Oh25z1uxav60 +sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure35 +11H3a6UCAwEAAaOCASQwggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyv +cop9NteaHNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFwOi8v +ZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El +MjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1ERT9jZXJ0aWZpY2F0ZXJldm9jYXRp +b25saXN0MEagRKBChkBodHRwOi8vd3d3LmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xh +c3NfM19jYV8yX2V2XzIwMDkuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+ +PPoeUSbrh/Yp3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNFCSuGdXzfX2lX +ANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7naxpeG0ILD5EJt/rDiZE4OJudA +NCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqXKVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVv +w9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +PSCProcert +========== +-----BEGIN CERTIFICATE----- +MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1dG9yaWRhZCBk +ZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9sYW5vMQswCQYDVQQGEwJWRTEQ +MA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlzdHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lz +dGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBl +cmludGVuZGVuY2lhIGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUw +IwYJKoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEwMFoXDTIw +MTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHByb2NlcnQubmV0LnZlMQ8w +DQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGExKjAoBgNVBAsTIVByb3ZlZWRvciBkZSBD +ZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZp +Y2FjaW9uIEVsZWN0cm9uaWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo97BVC +wfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74BCXfgI8Qhd19L3uA +3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38GieU89RLAu9MLmV+QfI4tL3czkkoh +RqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmO +EO8GqQKJ/+MMbpfg353bIdD0PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG2 +0qCZyFSTXai20b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH +0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/6mnbVSKVUyqU +td+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1mv6JpIzi4mWCZDlZTOpx+FIyw +Bm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvp +r2uKGcfLFFb14dq12fy/czja+eevbqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/ +AgEBMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAz +Ni0wMB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFDgBStuyId +xuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRp +ZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQH +EwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5h +Y2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5k +ZW5jaWEgZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG +9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQDAgEGME0GA1UdEQRG +MESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0wMDAwMDKgGwYFYIZeAgKgEgwQUklG +LUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEagRKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52 +ZS9sY3IvQ0VSVElGSUNBRE8tUkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNy +YWl6LnN1c2NlcnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v +Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsGAQUFBwIBFh5o +dHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcNAQELBQADggIBACtZ6yKZu4Sq +T96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmN +g7+mvTV+LFwxNG9s2/NkAZiqlCxB3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4q +uxtxj7mkoP3YldmvWb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1 +n8GhHVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHmpHmJWhSn +FFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXzsOfIt+FTvZLm8wyWuevo +5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bEqCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq +3TNWOByyrYDT13K9mmyZY+gAu0F2BbdbmRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5 +poLWccret9W6aAjtmcz9opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3Y +eMLEYC/HYvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km +-----END CERTIFICATE----- + +China Internet Network Information Center EV Certificates Root +============================================================== +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgIESJ8AATANBgkqhkiG9w0BAQUFADCBijELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyMUcwRQYDVQQDDD5D +aGluYSBJbnRlcm5ldCBOZXR3b3JrIEluZm9ybWF0aW9uIENlbnRlciBFViBDZXJ0aWZpY2F0ZXMg +Um9vdDAeFw0xMDA4MzEwNzExMjVaFw0zMDA4MzEwNzExMjVaMIGKMQswCQYDVQQGEwJDTjEyMDAG +A1UECgwpQ2hpbmEgSW50ZXJuZXQgTmV0d29yayBJbmZvcm1hdGlvbiBDZW50ZXIxRzBFBgNVBAMM +PkNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyIEVWIENlcnRpZmljYXRl +cyBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm35z7r07eKpkQ0H1UN+U8i6y +jUqORlTSIRLIOTJCBumD1Z9S7eVnAztUwYyZmczpwA//DdmEEbK40ctb3B75aDFk4Zv6dOtouSCV +98YPjUesWgbdYavi7NifFy2cyjw1l1VxzUOFsUcW9SxTgHbP0wBkvUCZ3czY28Sf1hNfQYOL+Q2H +klY0bBoQCxfVWhyXWIQ8hBouXJE0bhlffxdpxWXvayHG1VA6v2G5BY3vbzQ6sm8UY78WO5upKv23 +KzhmBsUs4qpnHkWnjQRmQvaPK++IIGmPMowUc9orhpFjIpryp9vOiYurXccUwVswah+xt54ugQEC +7c+WXmPbqOY4twIDAQABo2MwYTAfBgNVHSMEGDAWgBR8cks5x8DbYqVPm6oYNJKiyoOCWTAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUfHJLOcfA22KlT5uqGDSSosqD +glkwDQYJKoZIhvcNAQEFBQADggEBACrDx0M3j92tpLIM7twUbY8opJhJywyA6vPtI2Z1fcXTIWd5 +0XPFtQO3WKwMVC/GVhMPMdoG52U7HW8228gd+f2ABsqjPWYWqJ1MFn3AlUa1UeTiH9fqBk1jjZaM +7+czV0I664zBechNdn3e9rG3geCg+aF4RhcaVpjwTj2rHO3sOdwHSPdj/gauwqRcalsyiMXHM4Ws +ZkJHwlgkmeHlPuV1LI5D1l08eB6olYIpUNHRFrrvwb562bTYzB5MRuF3sTGrvSrIzo9uoV1/A3U0 +5K2JRVRevq4opbs/eHnrc7MKDf2+yfdWrPa37S+bISnHOLaVxATywy39FCqQmbkHzJ8= +-----END CERTIFICATE----- + +Swisscom Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2 +MjUwNzM4MTRaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvErjw0DzpPM +LgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r0rk0X2s682Q2zsKwzxNo +ysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJ +wDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVPACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpH +Wrumnf2U5NGKpV+GY3aFy6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1a +SgJA/MTAtukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL6yxS +NLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0uPoTXGiTOmekl9Ab +mbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrALacywlKinh/LTSlDcX3KwFnUey7QY +Ypqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velhk6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3 +qPyZ7iVNTA6z00yPhOgpD/0QVAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O +BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqhb97iEoHF8Twu +MA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4RfbgZPnm3qKhyN2abGu2sEzsO +v2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ +82YqZh6NM4OKb3xuqFp1mrjX2lhIREeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLz +o9v/tdhZsnPdTSpxsrpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcs +a0vvaGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciATwoCqISxx +OQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99nBjx8Oto0QuFmtEYE3saW +mA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5Wt6NlUe07qxS/TFED6F+KBZvuim6c779o ++sjaC+NCydAXFJy3SuCvkychVSa1ZC+N8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TC +rvJcwhbtkj6EPnNgiLx29CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX +5OfNeOI5wSsSnqaeG8XmDtkx2Q== +-----END CERTIFICATE----- + +Swisscom Root EV CA 2 +===================== +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAwZzELMAkGA1UE +BhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdpdGFsIENlcnRpZmljYXRlIFNl +cnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcN +MzEwNjI1MDg0NTA4WjBnMQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsT +HERpZ2l0YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYg +Q0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7BxUglgRCgz +o3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD1ycfMQ4jFrclyxy0uYAy +Xhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPHoCE2G3pXKSinLr9xJZDzRINpUKTk4Rti +GZQJo/PDvO/0vezbE53PnUgJUmfANykRHvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8Li +qG12W0OfvrSdsyaGOx9/5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaH +Za0zKcQvidm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHLOdAG +alNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaCNYGu+HuB5ur+rPQa +m3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f46Fq9mDU5zXNysRojddxyNMkM3Ox +bPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCBUWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDi +xzgHcgplwLa7JSnaFp6LNYth7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED +MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWBbj2ITY1x0kbB +bkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6xXCX5145v9Ydkn+0UjrgEjihL +j6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98TPLr+flaYC/NUn81ETm484T4VvwYmneTwkLbU +wp4wLh/vx3rEUMfqe9pQy3omywC0Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7 +XwgiG/W9mR4U9s70WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH +59yLGn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm7JFe3VE/ +23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4Snr8PyQUQ3nqjsTzyP6Wq +J3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VNvBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyA +HmBR3NdUIR7KYndP+tiPsys6DXhyyWhBWkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/gi +uMod89a2GQ+fYWVq6nTIfI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuW +l8PVP3wbI+2ksx0WckNLIOFZfsLorSa/ovc= +-----END CERTIFICATE----- + +CA Disig Root R1 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQyMDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy +3QRkD2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/oOI7bm+V8 +u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3AfQ+lekLZWnDZv6fXARz2 +m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJeIgpFy4QxTaz+29FHuvlglzmxZcfe+5nk +CiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8noc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTa +YVKvJrT1cU/J19IG32PK/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6 +vpmumwKjrckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD3AjL +LhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE7cderVC6xkGbrPAX +ZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkCyC2fg69naQanMVXVz0tv/wQFx1is +XxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLdqvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ +04IwDQYJKoZIhvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaASfX8MPWbTx9B +LxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXoHqJPYNcHKfyyo6SdbhWSVhlM +CrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpBemOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5Gfb +VSUZP/3oNn6z4eGBrxEWi1CXYBmCAMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85 +YmLLW1AL14FABZyb7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKS +ds+xDzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvkF7mGnjix +lAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqFa3qdnom2piiZk4hA9z7N +UaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsTQ6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJ +a7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- + +CA Disig Root R2 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQyMDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbC +w3OeNcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNHPWSb6Wia +xswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3Ix2ymrdMxp7zo5eFm1tL7 +A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbeQTg06ov80egEFGEtQX6sx3dOy1FU+16S +GBsEWmjGycT6txOgmLcRK7fWV8x8nhfRyyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqV +g8NTEQxzHQuyRpDRQjrOQG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa +5Beny912H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJQfYE +koopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUDi/ZnWejBBhG93c+A +Ak9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORsnLMOPReisjQS1n6yqEm70XooQL6i +Fh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5u +Qu0wDQYJKoZIhvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqfGopTpti72TVV +sRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkblvdhuDvEK7Z4bLQjb/D907Je +dR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W8 +1k/BfDxujRNt+3vrMNDcTa/F1balTFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjx +mHHEt38OFdAlab0inSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01 +utI3gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18DrG5gPcFw0 +sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3OszMOl6W8KjptlwlCFtaOg +UxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8xL4ysEr3vQCj8KWefshNPZiTEUxnpHikV +7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +ACCVRAIZ1 +========= +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UEAwwJQUNDVlJB +SVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQswCQYDVQQGEwJFUzAeFw0xMTA1 +MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQBgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwH +UEtJQUNDVjENMAsGA1UECgwEQUNDVjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCbqau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gM +jmoYHtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWoG2ioPej0 +RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpAlHPrzg5XPAOBOp0KoVdD +aaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhrIA8wKFSVf+DuzgpmndFALW4ir50awQUZ +0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDG +WuzndN9wrqODJerWx5eHk6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs7 +8yM2x/474KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMOm3WR +5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpacXpkatcnYGMN285J +9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPluUsXQA+xtrn13k/c4LOsOxFwYIRK +Q26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYIKwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRw +Oi8vd3d3LmFjY3YuZXMvZmlsZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEu +Y3J0MB8GCCsGAQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeTVfZW6oHlNsyM +Hj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIGCCsGAQUFBwICMIIBFB6CARAA +QQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUAcgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBh +AO0AegAgAGQAZQAgAGwAYQAgAEEAQwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUA +YwBuAG8AbABvAGcA7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBj +AHQAcgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAAQwBQAFMA +IABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUAczAwBggrBgEFBQcCARYk +aHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2MuaHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0 +dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRtaW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2 +MV9kZXIuY3JsMA4GA1UdDwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZI +hvcNAQEFBQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdpD70E +R9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gUJyCpZET/LtZ1qmxN +YEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+mAM/EKXMRNt6GGT6d7hmKG9Ww7Y49 +nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepDvV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJ +TS+xJlsndQAJxGJ3KQhfnlmstn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3 +sCPdK6jT2iWH7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szAh1xA2syVP1Xg +Nce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xFd3+YJ5oyXSrjhO7FmGYvliAd +3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2HpPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3p +EfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +TWCA Global Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcxEjAQBgNVBAoT +CVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMTVFdDQSBHbG9iYWwgUm9vdCBD +QTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQK +EwlUQUlXQU4tQ0ExEDAOBgNVBAsTB1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2C +nJfF10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz0ALfUPZV +r2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfChMBwqoJimFb3u/Rk28OKR +Q4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbHzIh1HrtsBv+baz4X7GGqcXzGHaL3SekV +tTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1W +KKD+u4ZqyPpcC1jcxkt2yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99 +sy2sbZCilaLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYPoA/p +yJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQABDzfuBSO6N+pjWxn +kjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcEqYSjMq+u7msXi7Kx/mzhkIyIqJdI +zshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6g +cFGn90xHNcgL1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WFH6vPNOw/KP4M +8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNoRI2T9GRwoD2dKAXDOXC4Ynsg +/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlg +lPx4mI88k1HtQJAH32RjJMtOcQWh15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryP +A9gK8kxkRr05YuWW6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3m +i4TWnsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5jwa19hAM8 +EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWzaGHQRiapIVJpLesux+t3 +zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmyKwbQBM0= +-----END CERTIFICATE----- + +TeliaSonera Root CA v1 +====================== +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAwNzEUMBIGA1UE +CgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJvb3QgQ0EgdjEwHhcNMDcxMDE4 +MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYDVQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwW +VGVsaWFTb25lcmEgUm9vdCBDQSB2MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+ +6yfwIaPzaSZVfp3FVRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA +3GV17CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+XZ75Ljo1k +B1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+/jXh7VB7qTCNGdMJjmhn +Xb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxH +oLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkmdtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3 +F0fUTPHSiXk+TT2YqGHeOh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJ +oWjiUIMusDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4pgd7 +gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fsslESl1MpWtTwEhDc +TwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQarMCpgKIv7NHfirZ1fpoeDVNAgMB +AAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qW +DNXr+nuqF+gTEjANBgkqhkiG9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNm +zqjMDfz1mgbldxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1TjTQpgcmLNkQfW +pb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBedY2gea+zDTYa4EzAvXUYNR0PV +G6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpc +c41teyWRyu5FrgZLAMzTsVlQ2jqIOylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOT +JsjrDNYmiLbAJM+7vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2 +qReWt88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcnHL/EVlP6 +Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems +WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +E-Tugra Certification Authority +=============================== +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w +DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls +ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw +NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx +QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl +cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD +DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd +hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K +CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g +ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ +BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0 +E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz +rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq +jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5 +dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG +MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK +kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO +XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807 +VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo +a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc +dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV +KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT +Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0 +8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G +C7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 2 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgx +MDAxMTA0MDE0WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUdAqSzm1nzHoqvNK38DcLZ +SBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiCFoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/F +vudocP05l03Sx5iRUKrERLMjfTlH6VJi1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx970 +2cu+fjOlbpSD8DT6IavqjnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGV +WOHAD3bZwI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/WSA2AHmgoCJrjNXy +YdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhyNsZt+U2e+iKo4YFWz827n+qrkRk4 +r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPACuvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNf +vNoBYimipidx5joifsFvHZVwIEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR +3p1m0IvVVGb6g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlPBSeOE6Fuwg== +-----END CERTIFICATE----- + +Atos TrustedRoot 2011 +===================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UEAwwVQXRvcyBU +cnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0xMTA3MDcxNDU4 +MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMMFUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsG +A1UECgwEQXRvczELMAkGA1UEBhMCREUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV +hTuXbyo7LjvPpvMpNb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr +54rMVD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+SZFhyBH+ +DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ4J7sVaE3IqKHBAUsR320 +HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0Lcp2AMBYHlT8oDv3FdU9T1nSatCQujgKR +z3bFmx5VdJx4IbHwLfELn8LVlhgf8FQieowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7R +l+lwrrw7GWzbITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZ +bNshMBgGA1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +CwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8jvZfza1zv7v1Apt+h +k6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kPDpFrdRbhIfzYJsdHt6bPWHJxfrrh +TZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pcmaHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a9 +61qn8FYiqTxlVMYVqL2Gns2Dlmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G +3mB/ufNPRJLvKrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudformation/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudformation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d602dc39e8c593248117167e6234b7dafac874c2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudformation/__init__.py @@ -0,0 +1,56 @@ +# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.cloudformation.connection import CloudFormationConnection +from boto.regioninfo import RegionInfo, get_regions, load_regions + +RegionData = load_regions().get('cloudformation') + + +def regions(): + """ + Get all available regions for the CloudFormation service. + + :rtype: list + :return: A list of :class:`boto.RegionInfo` instances + """ + return get_regions( + 'cloudformation', + connection_cls=CloudFormationConnection + ) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.cloudformation.CloudFormationConnection`. + + :param str region_name: The name of the region to connect to. + + :rtype: :class:`boto.cloudformation.CloudFormationConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudformation/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudformation/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..ee850b30cae25858cb0ede019d1afef56b76fc44 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudformation/connection.py @@ -0,0 +1,922 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.cloudformation.stack import Stack, StackSummary, StackEvent +from boto.cloudformation.stack import StackResource, StackResourceSummary +from boto.cloudformation.template import Template +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.compat import json + + +class CloudFormationConnection(AWSQueryConnection): + """ + AWS CloudFormation + AWS CloudFormation enables you to create and manage AWS + infrastructure deployments predictably and repeatedly. AWS + CloudFormation helps you leverage AWS products such as Amazon EC2, + EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable, + highly scalable, cost effective applications without worrying + about creating and configuring the underlying AWS infrastructure. + + With AWS CloudFormation, you declare all of your resources and + dependencies in a template file. The template defines a collection + of resources as a single unit called a stack. AWS CloudFormation + creates and deletes all member resources of the stack together and + manages all dependencies between the resources for you. + + For more information about this product, go to the `CloudFormation + Product Page`_. + + Amazon CloudFormation makes use of other AWS products. If you need + additional technical information about a specific AWS product, you + can find the product's technical documentation at + `http://aws.amazon.com/documentation/`_. + """ + APIVersion = boto.config.get('Boto', 'cfn_version', '2010-05-15') + DefaultRegionName = boto.config.get('Boto', 'cfn_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'cfn_region_endpoint', + 'cloudformation.us-east-1.amazonaws.com') + + valid_states = ( + 'CREATE_IN_PROGRESS', 'CREATE_FAILED', 'CREATE_COMPLETE', + 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE', + 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', + 'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS', + 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_IN_PROGRESS', + 'UPDATE_ROLLBACK_FAILED', + 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', + 'UPDATE_ROLLBACK_COMPLETE') + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + converter=None, security_token=None, validate_certs=True, + profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint, CloudFormationConnection) + self.region = region + super(CloudFormationConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def encode_bool(self, v): + v = bool(v) + return {True: "true", False: "false"}[v] + + def _build_create_or_update_params(self, stack_name, template_body, + template_url, parameters, disable_rollback, timeout_in_minutes, + notification_arns, capabilities, on_failure, stack_policy_body, + stack_policy_url, tags, use_previous_template=None, + stack_policy_during_update_body=None, + stack_policy_during_update_url=None): + """ + Helper that creates JSON parameters needed by a Stack Create or + Stack Update call. + + :type stack_name: string + :param stack_name: + The name associated with the stack. The name must be unique within your + AWS account. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. If both `TemplateBody` and + `TemplateUrl` are passed, only `TemplateBody` is used. + `TemplateBody`. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. If both `TemplateBody` and + `TemplateUrl` are passed, only `TemplateBody` is used. + `TemplateBody`. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the stack. A 3-tuple (key, value, bool) may be used to + specify the `UsePreviousValue` option. + + :type disable_rollback: boolean + :param disable_rollback: Set to `True` to disable rollback of the stack + if stack creation failed. You can specify either `DisableRollback` + or `OnFailure`, but not both. + Default: `False` + + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. + + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). + + :type capabilities: list + :param capabilities: The list of capabilities that you want to allow in + the stack. If your template contains certain resources, you must + specify the CAPABILITY_IAM value for this parameter; otherwise, + this action returns an InsufficientCapabilities error. The + following resources require you to specify the capabilities + parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_, + `AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_, + `AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and + `AWS::IAM::UserToGroupAddition`_. + + :type on_failure: string + :param on_failure: Determines what action will be taken if stack + creation fails. This must be one of: DO_NOTHING, ROLLBACK, or + DELETE. You can specify either `OnFailure` or `DisableRollback`, + but not both. + Default: `ROLLBACK` + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + If you pass `StackPolicyBody` and `StackPolicyURL`, only + `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. If you pass + `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is + used. + + :type tags: list + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + + :type use_previous_template: boolean + :param use_previous_template: Set to `True` to use the previous + template instead of uploading a new one via `TemplateBody` or + `TemplateURL`. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. + + :type stack_policy_during_update_body: string + :param stack_policy_during_update_body: Structure containing the + temporary overriding stack policy body. If you pass + `StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`, + only `StackPolicyDuringUpdateBody` is used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that associated with the stack + will be used. + + :type stack_policy_during_update_url: string + :param stack_policy_during_update_url: Location of a file containing + the temporary overriding stack policy. The URL must point to a + policy (max size: 16KB) located in an S3 bucket in the same region + as the stack. If you pass `StackPolicyDuringUpdateBody` and + `StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is + used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that is associated with the stack + will be used. + + :rtype: dict + :return: JSON parameters represented as a Python dict. + """ + params = {'ContentType': "JSON", 'StackName': stack_name, + 'DisableRollback': self.encode_bool(disable_rollback)} + if template_body: + params['TemplateBody'] = template_body + if template_url: + params['TemplateURL'] = template_url + if use_previous_template is not None: + params['UsePreviousTemplate'] = self.encode_bool(use_previous_template) + if template_body and template_url: + boto.log.warning("If both TemplateBody and TemplateURL are" + " specified, only TemplateBody will be honored by the API") + if parameters and len(parameters) > 0: + for i, parameter_tuple in enumerate(parameters): + key, value = parameter_tuple[:2] + use_previous = (parameter_tuple[2] + if len(parameter_tuple) > 2 else False) + params['Parameters.member.%d.ParameterKey' % (i + 1)] = key + if use_previous: + params['Parameters.member.%d.UsePreviousValue' + % (i + 1)] = self.encode_bool(use_previous) + else: + params['Parameters.member.%d.ParameterValue' % (i + 1)] = value + + if capabilities: + for i, value in enumerate(capabilities): + params['Capabilities.member.%d' % (i + 1)] = value + if tags: + for i, (key, value) in enumerate(tags.items()): + params['Tags.member.%d.Key' % (i + 1)] = key + params['Tags.member.%d.Value' % (i + 1)] = value + if notification_arns and len(notification_arns) > 0: + self.build_list_params(params, notification_arns, + "NotificationARNs.member") + if timeout_in_minutes: + params['TimeoutInMinutes'] = int(timeout_in_minutes) + if disable_rollback is not None: + params['DisableRollback'] = str( + disable_rollback).lower() + if on_failure is not None: + params['OnFailure'] = on_failure + if stack_policy_body is not None: + params['StackPolicyBody'] = stack_policy_body + if stack_policy_url is not None: + params['StackPolicyURL'] = stack_policy_url + if stack_policy_during_update_body is not None: + params['StackPolicyDuringUpdateBody'] = stack_policy_during_update_body + if stack_policy_during_update_url is not None: + params['StackPolicyDuringUpdateURL'] = stack_policy_during_update_url + return params + + def _do_request(self, call, params, path, method): + """ + Do a request via ``self.make_request`` and parse the JSON response. + + :type call: string + :param call: Call name, e.g. ``CreateStack`` + + :type params: dict + :param params: Dictionary of call parameters + + :type path: string + :param path: Server path + + :type method: string + :param method: HTTP method to use + + :rtype: dict + :return: Parsed JSON response data + """ + response = self.make_request(call, params, path, method) + body = response.read().decode('utf-8') + if response.status == 200: + body = json.loads(body) + return body + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body=body) + + def create_stack(self, stack_name, template_body=None, template_url=None, + parameters=None, notification_arns=None, disable_rollback=None, + timeout_in_minutes=None, capabilities=None, tags=None, + on_failure=None, stack_policy_body=None, stack_policy_url=None): + """ + Creates a stack as specified in the template. After the call + completes successfully, the stack creation starts. You can + check the status of the stack via the DescribeStacks API. + Currently, the limit for stacks is 20 stacks per account per + region. + + :type stack_name: string + :param stack_name: + The name associated with the stack. The name must be unique within your + AWS account. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the stack. + + :type disable_rollback: boolean + :param disable_rollback: Set to `True` to disable rollback of the stack + if stack creation failed. You can specify either `DisableRollback` + or `OnFailure`, but not both. + Default: `False` + + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. + + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). + + :type capabilities: list + :param capabilities: The list of capabilities that you want to allow in + the stack. If your template contains certain resources, you must + specify the CAPABILITY_IAM value for this parameter; otherwise, + this action returns an InsufficientCapabilities error. The + following resources require you to specify the capabilities + parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_, + `AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_, + `AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and + `AWS::IAM::UserToGroupAddition`_. + + :type on_failure: string + :param on_failure: Determines what action will be taken if stack + creation fails. This must be one of: DO_NOTHING, ROLLBACK, or + DELETE. You can specify either `OnFailure` or `DisableRollback`, + but not both. + Default: `ROLLBACK` + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + If you pass `StackPolicyBody` and `StackPolicyURL`, only + `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. If you pass + `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is + used. + + :type tags: dict + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + """ + params = self._build_create_or_update_params(stack_name, template_body, + template_url, parameters, disable_rollback, timeout_in_minutes, + notification_arns, capabilities, on_failure, stack_policy_body, + stack_policy_url, tags) + body = self._do_request('CreateStack', params, '/', 'POST') + return body['CreateStackResponse']['CreateStackResult']['StackId'] + + def update_stack(self, stack_name, template_body=None, template_url=None, + parameters=None, notification_arns=None, disable_rollback=False, + timeout_in_minutes=None, capabilities=None, tags=None, + use_previous_template=None, + stack_policy_during_update_body=None, + stack_policy_during_update_url=None, + stack_policy_body=None, stack_policy_url=None): + """ + Updates a stack as specified in the template. After the call + completes successfully, the stack update starts. You can check + the status of the stack via the DescribeStacks action. + + + + **Note: **You cannot update `AWS::S3::Bucket`_ resources, for + example, to add or modify tags. + + + + To get a copy of the template for an existing stack, you can + use the GetTemplate action. + + Tags that were associated with this stack during creation time + will still be associated with the stack after an `UpdateStack` + operation. + + For more information about creating an update template, + updating a stack, and monitoring the progress of the update, + see `Updating a Stack`_. + + :type stack_name: string + :param stack_name: + The name or stack ID of the stack to update. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. If both `TemplateBody` and + `TemplateUrl` are passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. If both `TemplateBody` and + `TemplateUrl` are passed, only `TemplateBody` is used. + `TemplateBody`. + + :type use_previous_template: boolean + :param use_previous_template: Set to `True` to use the previous + template instead of uploading a new one via `TemplateBody` or + `TemplateURL`. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the stack. A 3-tuple (key, value, bool) may be used to + specify the `UsePreviousValue` option. + + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). + + :type disable_rollback: bool + :param disable_rollback: Indicates whether or not to rollback on + failure. + + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. + + :type capabilities: list + :param capabilities: The list of capabilities you want to allow in + the stack. Currently, the only valid capability is + 'CAPABILITY_IAM'. + + :type tags: dict + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template located in an S3 bucket in the same + region as the stack. For more information, go to `Template + Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type stack_policy_during_update_body: string + :param stack_policy_during_update_body: Structure containing the + temporary overriding stack policy body. If you pass + `StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`, + only `StackPolicyDuringUpdateBody` is used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that associated with the stack + will be used. + + :type stack_policy_during_update_url: string + :param stack_policy_during_update_url: Location of a file containing + the temporary overriding stack policy. The URL must point to a + policy (max size: 16KB) located in an S3 bucket in the same region + as the stack. If you pass `StackPolicyDuringUpdateBody` and + `StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is + used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that is associated with the stack + will be used. + + :rtype: string + :return: The unique Stack ID. + """ + params = self._build_create_or_update_params(stack_name, template_body, + template_url, parameters, disable_rollback, timeout_in_minutes, + notification_arns, capabilities, None, stack_policy_body, + stack_policy_url, tags, use_previous_template, + stack_policy_during_update_body, stack_policy_during_update_url) + body = self._do_request('UpdateStack', params, '/', 'POST') + return body['UpdateStackResponse']['UpdateStackResult']['StackId'] + + def delete_stack(self, stack_name_or_id): + """ + Deletes a specified stack. Once the call completes + successfully, stack deletion starts. Deleted stacks do not + show up in the DescribeStacks API if the deletion has been + completed successfully. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id} + return self._do_request('DeleteStack', params, '/', 'GET') + + def describe_stack_events(self, stack_name_or_id=None, next_token=None): + """ + Returns all stack related events for a specified stack. For + more information about a stack's event history, go to + `Stacks`_ in the AWS CloudFormation User Guide. + Events are returned, even if the stack never existed or has + been successfully deleted. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + events, if there is one. + Default: There is no default value. + + """ + params = {} + if stack_name_or_id: + params['StackName'] = stack_name_or_id + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeStackEvents', params, [('member', + StackEvent)]) + + def describe_stack_resource(self, stack_name_or_id, logical_resource_id): + """ + Returns a description of the specified resource in the + specified stack. + + For deleted stacks, DescribeStackResource returns resource + information for up to 90 days after the stack has been + deleted. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Default: There is no default value. + + :type logical_resource_id: string + :param logical_resource_id: The logical name of the resource as + specified in the template. + Default: There is no default value. + + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id, + 'LogicalResourceId': logical_resource_id} + return self._do_request('DescribeStackResource', params, '/', 'GET') + + def describe_stack_resources(self, stack_name_or_id=None, + logical_resource_id=None, + physical_resource_id=None): + """ + Returns AWS resource descriptions for running and deleted + stacks. If `StackName` is specified, all the associated + resources that are part of the stack are returned. If + `PhysicalResourceId` is specified, the associated resources of + the stack that the resource belongs to are returned. + Only the first 100 resources will be returned. If your stack + has more resources than this, you should use + `ListStackResources` instead. + For deleted stacks, `DescribeStackResources` returns resource + information for up to 90 days after the stack has been + deleted. + + You must specify either `StackName` or `PhysicalResourceId`, + but not both. In addition, you can specify `LogicalResourceId` + to filter the returned result. For more information about + resources, the `LogicalResourceId` and `PhysicalResourceId`, + go to the `AWS CloudFormation User Guide`_. + A `ValidationError` is returned if you specify both + `StackName` and `PhysicalResourceId` in the same request. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Required: Conditional. If you do not specify `StackName`, you must + specify `PhysicalResourceId`. + + Default: There is no default value. + + :type logical_resource_id: string + :param logical_resource_id: The logical name of the resource as + specified in the template. + Default: There is no default value. + + :type physical_resource_id: string + :param physical_resource_id: The name or unique identifier that + corresponds to a physical instance ID of a resource supported by + AWS CloudFormation. + For example, for an Amazon Elastic Compute Cloud (EC2) instance, + `PhysicalResourceId` corresponds to the `InstanceId`. You can pass + the EC2 `InstanceId` to `DescribeStackResources` to find which + stack the instance belongs to and what other resources are part of + the stack. + + Required: Conditional. If you do not specify `PhysicalResourceId`, you + must specify `StackName`. + + Default: There is no default value. + + """ + params = {} + if stack_name_or_id: + params['StackName'] = stack_name_or_id + if logical_resource_id: + params['LogicalResourceId'] = logical_resource_id + if physical_resource_id: + params['PhysicalResourceId'] = physical_resource_id + return self.get_list('DescribeStackResources', params, + [('member', StackResource)]) + + def describe_stacks(self, stack_name_or_id=None, next_token=None): + """ + Returns the description for the specified stack; if no stack + name was specified, then it returns the description for all + the stacks created. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stacks, if there is one. + + """ + params = {} + if stack_name_or_id: + params['StackName'] = stack_name_or_id + if next_token is not None: + params['NextToken'] = next_token + return self.get_list('DescribeStacks', params, [('member', Stack)]) + + def get_template(self, stack_name_or_id): + """ + Returns the template body for a specified stack. You can get + the template for running or deleted stacks. + + For deleted stacks, GetTemplate returns the template for up to + 90 days after the stack has been deleted. + If the template does not exist, a `ValidationError` is + returned. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack, which are not always interchangeable: + + + Running stacks: You can specify either the stack's name or its unique + stack ID. + + Deleted stacks: You must specify the unique stack ID. + + + Default: There is no default value. + + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id} + return self._do_request('GetTemplate', params, '/', 'GET') + + def list_stack_resources(self, stack_name_or_id, next_token=None): + """ + Returns descriptions of all resources of the specified stack. + + For deleted stacks, ListStackResources returns resource + information for up to 90 days after the stack has been + deleted. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack, which are not always interchangeable: + + + Running stacks: You can specify either the stack's name or its unique + stack ID. + + Deleted stacks: You must specify the unique stack ID. + + + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stack resource summaries, if there is one. + Default: There is no default value. + + """ + params = {'StackName': stack_name_or_id} + if next_token: + params['NextToken'] = next_token + return self.get_list('ListStackResources', params, + [('member', StackResourceSummary)]) + + def list_stacks(self, stack_status_filters=None, next_token=None): + """ + Returns the summary information for stacks whose status + matches the specified StackStatusFilter. Summary information + for stacks that have been deleted is kept for 90 days after + the stack is deleted. If no StackStatusFilter is specified, + summary information for all stacks is returned (including + existing stacks and stacks that have been deleted). + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stacks, if there is one. + Default: There is no default value. + + :type stack_status_filter: list + :param stack_status_filter: Stack status to use as a filter. Specify + one or more stack status codes to list only stacks with the + specified status codes. For a complete list of stack status codes, + see the `StackStatus` parameter of the Stack data type. + + """ + params = {} + if next_token: + params['NextToken'] = next_token + if stack_status_filters and len(stack_status_filters) > 0: + self.build_list_params(params, stack_status_filters, + "StackStatusFilter.member") + + return self.get_list('ListStacks', params, + [('member', StackSummary)]) + + def validate_template(self, template_body=None, template_url=None): + """ + Validates a specified template. + + :type template_body: string + :param template_body: String containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + """ + params = {} + if template_body: + params['TemplateBody'] = template_body + if template_url: + params['TemplateURL'] = template_url + if template_body and template_url: + boto.log.warning("If both TemplateBody and TemplateURL are" + " specified, only TemplateBody will be honored by the API") + return self.get_object('ValidateTemplate', params, Template, + verb="POST") + + def cancel_update_stack(self, stack_name_or_id=None): + """ + Cancels an update on the specified stack. If the call + completes successfully, the stack will roll back the update + and revert to the previous stack configuration. + Only stacks that are in the UPDATE_IN_PROGRESS state can be + canceled. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated with + the stack. + + """ + params = {} + if stack_name_or_id: + params['StackName'] = stack_name_or_id + return self.get_status('CancelUpdateStack', params) + + def estimate_template_cost(self, template_body=None, template_url=None, + parameters=None): + """ + Returns the estimated monthly cost of a template. The return + value is an AWS Simple Monthly Calculator URL with a query + string that describes the resources required to run the + template. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template located in an S3 bucket in the same + region as the stack. For more information, go to `Template + Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the template. + + :rtype: string + :returns: URL to pre-filled cost calculator + """ + params = {'ContentType': "JSON"} + if template_body is not None: + params['TemplateBody'] = template_body + if template_url is not None: + params['TemplateURL'] = template_url + if parameters and len(parameters) > 0: + for i, (key, value) in enumerate(parameters): + params['Parameters.member.%d.ParameterKey' % (i + 1)] = key + params['Parameters.member.%d.ParameterValue' % (i + 1)] = value + + response = self._do_request('EstimateTemplateCost', params, '/', 'POST') + return response['EstimateTemplateCostResponse']\ + ['EstimateTemplateCostResult']\ + ['Url'] + + def get_stack_policy(self, stack_name_or_id): + """ + Returns the stack policy for a specified stack. If a stack + doesn't have a policy, a null value is returned. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or stack ID that is associated with + the stack whose policy you want to get. + + :rtype: string + :return: The policy JSON document + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id, } + response = self._do_request('GetStackPolicy', params, '/', 'POST') + return response['GetStackPolicyResponse']\ + ['GetStackPolicyResult']\ + ['StackPolicyBody'] + + def set_stack_policy(self, stack_name_or_id, stack_policy_body=None, + stack_policy_url=None): + """ + Sets a stack policy for a specified stack. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or stack ID that you want to + associate a policy with. + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + You must pass `StackPolicyBody` or `StackPolicyURL`. If both are + passed, only `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. You must pass + `StackPolicyBody` or `StackPolicyURL`. If both are passed, only + `StackPolicyBody` is used. + + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id, } + if stack_policy_body is not None: + params['StackPolicyBody'] = stack_policy_body + if stack_policy_url is not None: + params['StackPolicyURL'] = stack_policy_url + + response = self._do_request('SetStackPolicy', params, '/', 'POST') + return response['SetStackPolicyResponse'] diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudformation/stack.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudformation/stack.py new file mode 100755 index 0000000000000000000000000000000000000000..5dac0dd7cfce70519ff2ceb8d7fbdce7661ce954 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudformation/stack.py @@ -0,0 +1,415 @@ +from datetime import datetime + +from boto.resultset import ResultSet + + +class Stack(object): + def __init__(self, connection=None): + self.connection = connection + self.creation_time = None + self.description = None + self.disable_rollback = None + self.notification_arns = [] + self.outputs = [] + self.parameters = [] + self.capabilities = [] + self.tags = [] + self.stack_id = None + self.stack_status = None + self.stack_name = None + self.stack_name_reason = None + self.timeout_in_minutes = None + + def startElement(self, name, attrs, connection): + if name == "Parameters": + self.parameters = ResultSet([('member', Parameter)]) + return self.parameters + elif name == "Outputs": + self.outputs = ResultSet([('member', Output)]) + return self.outputs + elif name == "Capabilities": + self.capabilities = ResultSet([('member', Capability)]) + return self.capabilities + elif name == "Tags": + self.tags = Tag() + return self.tags + elif name == 'NotificationARNs': + self.notification_arns = ResultSet([('member', NotificationARN)]) + return self.notification_arns + else: + return None + + def endElement(self, name, value, connection): + if name == 'CreationTime': + try: + self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + except ValueError: + self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + elif name == "Description": + self.description = value + elif name == "DisableRollback": + if str(value).lower() == 'true': + self.disable_rollback = True + else: + self.disable_rollback = False + elif name == 'StackId': + self.stack_id = value + elif name == 'StackName': + self.stack_name = value + elif name == 'StackStatus': + self.stack_status = value + elif name == "StackStatusReason": + self.stack_status_reason = value + elif name == "TimeoutInMinutes": + self.timeout_in_minutes = int(value) + elif name == "member": + pass + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_stack(stack_name_or_id=self.stack_id) + + def describe_events(self, next_token=None): + return self.connection.describe_stack_events( + stack_name_or_id=self.stack_id, + next_token=next_token + ) + + def describe_resource(self, logical_resource_id): + return self.connection.describe_stack_resource( + stack_name_or_id=self.stack_id, + logical_resource_id=logical_resource_id + ) + + def describe_resources(self, logical_resource_id=None, + physical_resource_id=None): + return self.connection.describe_stack_resources( + stack_name_or_id=self.stack_id, + logical_resource_id=logical_resource_id, + physical_resource_id=physical_resource_id + ) + + def list_resources(self, next_token=None): + return self.connection.list_stack_resources( + stack_name_or_id=self.stack_id, + next_token=next_token + ) + + def update(self): + rs = self.connection.describe_stacks(self.stack_id) + if len(rs) == 1 and rs[0].stack_id == self.stack_id: + self.__dict__.update(rs[0].__dict__) + else: + raise ValueError("%s is not a valid Stack ID or Name" % + self.stack_id) + + def get_template(self): + return self.connection.get_template(stack_name_or_id=self.stack_id) + + def get_policy(self): + """ + Returns the stack policy for this stack. If it has no policy + then, a null value is returned. + """ + return self.connection.get_stack_policy(self.stack_id) + + def set_policy(self, stack_policy_body=None, stack_policy_url=None): + """ + Sets a stack policy for this stack. + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + You must pass `StackPolicyBody` or `StackPolicyURL`. If both are + passed, only `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. You must pass + `StackPolicyBody` or `StackPolicyURL`. If both are passed, only + `StackPolicyBody` is used. + """ + return self.connection.set_stack_policy(self.stack_id, + stack_policy_body=stack_policy_body, + stack_policy_url=stack_policy_url) + + +class StackSummary(object): + def __init__(self, connection=None): + self.connection = connection + self.stack_id = None + self.stack_status = None + self.stack_name = None + self.creation_time = None + self.deletion_time = None + self.template_description = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'StackId': + self.stack_id = value + elif name == 'StackStatus': + self.stack_status = value + elif name == 'StackName': + self.stack_name = value + elif name == 'CreationTime': + try: + self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + except ValueError: + self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + elif name == "DeletionTime": + try: + self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + except ValueError: + self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + elif name == 'TemplateDescription': + self.template_description = value + elif name == "member": + pass + else: + setattr(self, name, value) + + +class Parameter(object): + def __init__(self, connection=None): + self.connection = None + self.key = None + self.value = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "ParameterKey": + self.key = value + elif name == "ParameterValue": + self.value = value + else: + setattr(self, name, value) + + def __repr__(self): + return "Parameter:\"%s\"=\"%s\"" % (self.key, self.value) + + +class Output(object): + def __init__(self, connection=None): + self.connection = connection + self.description = None + self.key = None + self.value = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "Description": + self.description = value + elif name == "OutputKey": + self.key = value + elif name == "OutputValue": + self.value = value + else: + setattr(self, name, value) + + def __repr__(self): + return "Output:\"%s\"=\"%s\"" % (self.key, self.value) + + +class Capability(object): + def __init__(self, connection=None): + self.connection = None + self.value = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + self.value = value + + def __repr__(self): + return "Capability:\"%s\"" % (self.value) + + +class Tag(dict): + + def __init__(self, connection=None): + dict.__init__(self) + self.connection = connection + self._current_key = None + self._current_value = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "Key": + self._current_key = value + elif name == "Value": + self._current_value = value + else: + setattr(self, name, value) + + if self._current_key and self._current_value: + self[self._current_key] = self._current_value + self._current_key = None + self._current_value = None + + +class NotificationARN(object): + def __init__(self, connection=None): + self.connection = None + self.value = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + self.value = value + + def __repr__(self): + return "NotificationARN:\"%s\"" % (self.value) + + +class StackResource(object): + def __init__(self, connection=None): + self.connection = connection + self.description = None + self.logical_resource_id = None + self.physical_resource_id = None + self.resource_status = None + self.resource_status_reason = None + self.resource_type = None + self.stack_id = None + self.stack_name = None + self.timestamp = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "Description": + self.description = value + elif name == "LogicalResourceId": + self.logical_resource_id = value + elif name == "PhysicalResourceId": + self.physical_resource_id = value + elif name == "ResourceStatus": + self.resource_status = value + elif name == "ResourceStatusReason": + self.resource_status_reason = value + elif name == "ResourceType": + self.resource_type = value + elif name == "StackId": + self.stack_id = value + elif name == "StackName": + self.stack_name = value + elif name == "Timestamp": + try: + self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + except ValueError: + self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + else: + setattr(self, name, value) + + def __repr__(self): + return "StackResource:%s (%s)" % (self.logical_resource_id, + self.resource_type) + + +class StackResourceSummary(object): + def __init__(self, connection=None): + self.connection = connection + self.last_updated_time = None + self.logical_resource_id = None + self.physical_resource_id = None + self.resource_status = None + self.resource_status_reason = None + self.resource_type = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "LastUpdatedTime": + try: + self.last_updated_time = datetime.strptime( + value, + '%Y-%m-%dT%H:%M:%SZ' + ) + except ValueError: + self.last_updated_time = datetime.strptime( + value, + '%Y-%m-%dT%H:%M:%S.%fZ' + ) + elif name == "LogicalResourceId": + self.logical_resource_id = value + elif name == "PhysicalResourceId": + self.physical_resource_id = value + elif name == "ResourceStatus": + self.resource_status = value + elif name == "ResourceStatusReason": + self.resource_status_reason = value + elif name == "ResourceType": + self.resource_type = value + else: + setattr(self, name, value) + + def __repr__(self): + return "StackResourceSummary:%s (%s)" % (self.logical_resource_id, + self.resource_type) + + +class StackEvent(object): + valid_states = ("CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE", + "DELETE_IN_PROGRESS", "DELETE_FAILED", "DELETE_COMPLETE") + def __init__(self, connection=None): + self.connection = connection + self.event_id = None + self.logical_resource_id = None + self.physical_resource_id = None + self.resource_properties = None + self.resource_status = None + self.resource_status_reason = None + self.resource_type = None + self.stack_id = None + self.stack_name = None + self.timestamp = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "EventId": + self.event_id = value + elif name == "LogicalResourceId": + self.logical_resource_id = value + elif name == "PhysicalResourceId": + self.physical_resource_id = value + elif name == "ResourceProperties": + self.resource_properties = value + elif name == "ResourceStatus": + self.resource_status = value + elif name == "ResourceStatusReason": + self.resource_status_reason = value + elif name == "ResourceType": + self.resource_type = value + elif name == "StackId": + self.stack_id = value + elif name == "StackName": + self.stack_name = value + elif name == "Timestamp": + try: + self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + except ValueError: + self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + else: + setattr(self, name, value) + + def __repr__(self): + return "StackEvent %s %s %s" % (self.resource_type, + self.logical_resource_id, self.resource_status) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudformation/template.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudformation/template.py new file mode 100644 index 0000000000000000000000000000000000000000..bab2148630c105a3f31ec45746476a164a889901 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudformation/template.py @@ -0,0 +1,51 @@ +from boto.resultset import ResultSet +from boto.cloudformation.stack import Capability + +class Template(object): + def __init__(self, connection=None): + self.connection = connection + self.description = None + self.template_parameters = None + self.capabilities_reason = None + self.capabilities = None + + def startElement(self, name, attrs, connection): + if name == "Parameters": + self.template_parameters = ResultSet([('member', TemplateParameter)]) + return self.template_parameters + elif name == "Capabilities": + self.capabilities = ResultSet([('member', Capability)]) + return self.capabilities + else: + return None + + def endElement(self, name, value, connection): + if name == "Description": + self.description = value + elif name == "CapabilitiesReason": + self.capabilities_reason = value + else: + setattr(self, name, value) + +class TemplateParameter(object): + def __init__(self, parent): + self.parent = parent + self.default_value = None + self.description = None + self.no_echo = None + self.parameter_key = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "DefaultValue": + self.default_value = value + elif name == "Description": + self.description = value + elif name == "NoEcho": + self.no_echo = bool(value) + elif name == "ParameterKey": + self.parameter_key = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1afefebbf3f7daeef67303c0d86e09244df94d63 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/__init__.py @@ -0,0 +1,326 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import xml.sax +import time +import boto +from boto.connection import AWSAuthConnection +from boto import handler +from boto.cloudfront.distribution import Distribution, DistributionSummary, DistributionConfig +from boto.cloudfront.distribution import StreamingDistribution, StreamingDistributionSummary, StreamingDistributionConfig +from boto.cloudfront.identity import OriginAccessIdentity +from boto.cloudfront.identity import OriginAccessIdentitySummary +from boto.cloudfront.identity import OriginAccessIdentityConfig +from boto.cloudfront.invalidation import InvalidationBatch, InvalidationSummary, InvalidationListResultSet +from boto.resultset import ResultSet +from boto.cloudfront.exception import CloudFrontServerError + + +class CloudFrontConnection(AWSAuthConnection): + + DefaultHost = 'cloudfront.amazonaws.com' + Version = '2010-11-01' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + port=None, proxy=None, proxy_port=None, + host=DefaultHost, debug=0, security_token=None, + validate_certs=True, profile_name=None, https_connection_factory=None): + super(CloudFrontConnection, self).__init__(host, + aws_access_key_id, aws_secret_access_key, + True, port, proxy, proxy_port, debug=debug, + security_token=security_token, + validate_certs=validate_certs, + https_connection_factory=https_connection_factory, + profile_name=profile_name) + + def get_etag(self, response): + response_headers = response.msg + for key in response_headers.keys(): + if key.lower() == 'etag': + return response_headers[key] + return None + + def _required_auth_capability(self): + return ['cloudfront'] + + # Generics + + def _get_all_objects(self, resource, tags, result_set_class=None, + result_set_kwargs=None): + if not tags: + tags = [('DistributionSummary', DistributionSummary)] + response = self.make_request('GET', '/%s/%s' % (self.Version, + resource)) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise CloudFrontServerError(response.status, response.reason, body) + rs_class = result_set_class or ResultSet + rs_kwargs = result_set_kwargs or dict() + rs = rs_class(tags, **rs_kwargs) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + + def _get_info(self, id, resource, dist_class): + uri = '/%s/%s/%s' % (self.Version, resource, id) + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise CloudFrontServerError(response.status, response.reason, body) + d = dist_class(connection=self) + response_headers = response.msg + for key in response_headers.keys(): + if key.lower() == 'etag': + d.etag = response_headers[key] + h = handler.XmlHandler(d, self) + xml.sax.parseString(body, h) + return d + + def _get_config(self, id, resource, config_class): + uri = '/%s/%s/%s/config' % (self.Version, resource, id) + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise CloudFrontServerError(response.status, response.reason, body) + d = config_class(connection=self) + d.etag = self.get_etag(response) + h = handler.XmlHandler(d, self) + xml.sax.parseString(body, h) + return d + + def _set_config(self, distribution_id, etag, config): + if isinstance(config, StreamingDistributionConfig): + resource = 'streaming-distribution' + else: + resource = 'distribution' + uri = '/%s/%s/%s/config' % (self.Version, resource, distribution_id) + headers = {'If-Match': etag, 'Content-Type': 'text/xml'} + response = self.make_request('PUT', uri, headers, config.to_xml()) + body = response.read() + boto.log.debug(body) + if response.status != 200: + raise CloudFrontServerError(response.status, response.reason, body) + return self.get_etag(response) + + def _create_object(self, config, resource, dist_class): + response = self.make_request('POST', '/%s/%s' % (self.Version, + resource), + {'Content-Type': 'text/xml'}, + data=config.to_xml()) + body = response.read() + boto.log.debug(body) + if response.status == 201: + d = dist_class(connection=self) + h = handler.XmlHandler(d, self) + xml.sax.parseString(body, h) + d.etag = self.get_etag(response) + return d + else: + raise CloudFrontServerError(response.status, response.reason, body) + + def _delete_object(self, id, etag, resource): + uri = '/%s/%s/%s' % (self.Version, resource, id) + response = self.make_request('DELETE', uri, {'If-Match': etag}) + body = response.read() + boto.log.debug(body) + if response.status != 204: + raise CloudFrontServerError(response.status, response.reason, body) + + # Distributions + + def get_all_distributions(self): + tags = [('DistributionSummary', DistributionSummary)] + return self._get_all_objects('distribution', tags) + + def get_distribution_info(self, distribution_id): + return self._get_info(distribution_id, 'distribution', Distribution) + + def get_distribution_config(self, distribution_id): + return self._get_config(distribution_id, 'distribution', + DistributionConfig) + + def set_distribution_config(self, distribution_id, etag, config): + return self._set_config(distribution_id, etag, config) + + def create_distribution(self, origin, enabled, caller_reference='', + cnames=None, comment='', trusted_signers=None): + config = DistributionConfig(origin=origin, enabled=enabled, + caller_reference=caller_reference, + cnames=cnames, comment=comment, + trusted_signers=trusted_signers) + return self._create_object(config, 'distribution', Distribution) + + def delete_distribution(self, distribution_id, etag): + return self._delete_object(distribution_id, etag, 'distribution') + + # Streaming Distributions + + def get_all_streaming_distributions(self): + tags = [('StreamingDistributionSummary', StreamingDistributionSummary)] + return self._get_all_objects('streaming-distribution', tags) + + def get_streaming_distribution_info(self, distribution_id): + return self._get_info(distribution_id, 'streaming-distribution', + StreamingDistribution) + + def get_streaming_distribution_config(self, distribution_id): + return self._get_config(distribution_id, 'streaming-distribution', + StreamingDistributionConfig) + + def set_streaming_distribution_config(self, distribution_id, etag, config): + return self._set_config(distribution_id, etag, config) + + def create_streaming_distribution(self, origin, enabled, + caller_reference='', + cnames=None, comment='', + trusted_signers=None): + config = StreamingDistributionConfig(origin=origin, enabled=enabled, + caller_reference=caller_reference, + cnames=cnames, comment=comment, + trusted_signers=trusted_signers) + return self._create_object(config, 'streaming-distribution', + StreamingDistribution) + + def delete_streaming_distribution(self, distribution_id, etag): + return self._delete_object(distribution_id, etag, + 'streaming-distribution') + + # Origin Access Identity + + def get_all_origin_access_identity(self): + tags = [('CloudFrontOriginAccessIdentitySummary', + OriginAccessIdentitySummary)] + return self._get_all_objects('origin-access-identity/cloudfront', tags) + + def get_origin_access_identity_info(self, access_id): + return self._get_info(access_id, 'origin-access-identity/cloudfront', + OriginAccessIdentity) + + def get_origin_access_identity_config(self, access_id): + return self._get_config(access_id, + 'origin-access-identity/cloudfront', + OriginAccessIdentityConfig) + + def set_origin_access_identity_config(self, access_id, + etag, config): + return self._set_config(access_id, etag, config) + + def create_origin_access_identity(self, caller_reference='', comment=''): + config = OriginAccessIdentityConfig(caller_reference=caller_reference, + comment=comment) + return self._create_object(config, 'origin-access-identity/cloudfront', + OriginAccessIdentity) + + def delete_origin_access_identity(self, access_id, etag): + return self._delete_object(access_id, etag, + 'origin-access-identity/cloudfront') + + # Object Invalidation + + def create_invalidation_request(self, distribution_id, paths, + caller_reference=None): + """Creates a new invalidation request + :see: http://goo.gl/8vECq + """ + # We allow you to pass in either an array or + # an InvalidationBatch object + if not isinstance(paths, InvalidationBatch): + paths = InvalidationBatch(paths) + paths.connection = self + uri = '/%s/distribution/%s/invalidation' % (self.Version, + distribution_id) + response = self.make_request('POST', uri, + {'Content-Type': 'text/xml'}, + data=paths.to_xml()) + body = response.read() + if response.status == 201: + h = handler.XmlHandler(paths, self) + xml.sax.parseString(body, h) + return paths + else: + raise CloudFrontServerError(response.status, response.reason, body) + + def invalidation_request_status(self, distribution_id, + request_id, caller_reference=None): + uri = '/%s/distribution/%s/invalidation/%s' % (self.Version, + distribution_id, + request_id) + response = self.make_request('GET', uri, {'Content-Type': 'text/xml'}) + body = response.read() + if response.status == 200: + paths = InvalidationBatch([]) + h = handler.XmlHandler(paths, self) + xml.sax.parseString(body, h) + return paths + else: + raise CloudFrontServerError(response.status, response.reason, body) + + def get_invalidation_requests(self, distribution_id, marker=None, + max_items=None): + """ + Get all invalidation requests for a given CloudFront distribution. + This returns an instance of an InvalidationListResultSet that + automatically handles all of the result paging, etc. from CF - you just + need to keep iterating until there are no more results. + + :type distribution_id: string + :param distribution_id: The id of the CloudFront distribution + + :type marker: string + :param marker: Use this only when paginating results and only in + follow-up request after you've received a response where + the results are truncated. Set this to the value of the + Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results and only in a + follow-up request to indicate the maximum number of + invalidation requests you want in the response. You + will need to pass the next_marker property from the + previous InvalidationListResultSet response in the + follow-up request in order to get the next 'page' of + results. + + :rtype: :class:`boto.cloudfront.invalidation.InvalidationListResultSet` + :returns: An InvalidationListResultSet iterator that lists invalidation + requests for a given CloudFront distribution. Automatically + handles paging the results. + """ + uri = 'distribution/%s/invalidation' % distribution_id + params = dict() + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + if params: + uri += '?%s=%s' % params.popitem() + for k, v in params.items(): + uri += '&%s=%s' % (k, v) + tags=[('InvalidationSummary', InvalidationSummary)] + rs_class = InvalidationListResultSet + rs_kwargs = dict(connection=self, distribution_id=distribution_id, + max_items=max_items, marker=marker) + return self._get_all_objects(uri, tags, result_set_class=rs_class, + result_set_kwargs=rs_kwargs) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/distribution.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..7131c86c834e21e4c99338829382c4943c8f45a6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/distribution.py @@ -0,0 +1,757 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid +import base64 +import time +from boto.compat import six, json +from boto.cloudfront.identity import OriginAccessIdentity +from boto.cloudfront.object import Object, StreamingObject +from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners +from boto.cloudfront.logging import LoggingInfo +from boto.cloudfront.origin import S3Origin, CustomOrigin +from boto.s3.acl import ACL + +class DistributionConfig(object): + + def __init__(self, connection=None, origin=None, enabled=False, + caller_reference='', cnames=None, comment='', + trusted_signers=None, default_root_object=None, + logging=None): + """ + :param origin: Origin information to associate with the + distribution. If your distribution will use + an Amazon S3 origin, then this should be an + S3Origin object. If your distribution will use + a custom origin (non Amazon S3), then this + should be a CustomOrigin object. + :type origin: :class:`boto.cloudfront.origin.S3Origin` or + :class:`boto.cloudfront.origin.CustomOrigin` + + :param enabled: Whether the distribution is enabled to accept + end user requests for content. + :type enabled: bool + + :param caller_reference: A unique number that ensures the + request can't be replayed. If no + caller_reference is provided, boto + will generate a type 4 UUID for use + as the caller reference. + :type enabled: str + + :param cnames: A CNAME alias you want to associate with this + distribution. You can have up to 10 CNAME aliases + per distribution. + :type enabled: array of str + + :param comment: Any comments you want to include about the + distribution. + :type comment: str + + :param trusted_signers: Specifies any AWS accounts you want to + permit to create signed URLs for private + content. If you want the distribution to + use signed URLs, this should contain a + TrustedSigners object; if you want the + distribution to use basic URLs, leave + this None. + :type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners` + + :param default_root_object: Designates a default root object. + Only include a DefaultRootObject value + if you are going to assign a default + root object for the distribution. + :type comment: str + + :param logging: Controls whether access logs are written for the + distribution. If you want to turn on access logs, + this should contain a LoggingInfo object; otherwise + it should contain None. + :type logging: :class`boto.cloudfront.logging.LoggingInfo` + + """ + self.connection = connection + self.origin = origin + self.enabled = enabled + if caller_reference: + self.caller_reference = caller_reference + else: + self.caller_reference = str(uuid.uuid4()) + self.cnames = [] + if cnames: + self.cnames = cnames + self.comment = comment + self.trusted_signers = trusted_signers + self.logging = logging + self.default_root_object = default_root_object + + def __repr__(self): + return "DistributionConfig:%s" % self.origin + + def to_xml(self): + s = '\n' + s += '\n' + if self.origin: + s += self.origin.to_xml() + s += ' %s\n' % self.caller_reference + for cname in self.cnames: + s += ' %s\n' % cname + if self.comment: + s += ' %s\n' % self.comment + s += ' ' + if self.enabled: + s += 'true' + else: + s += 'false' + s += '\n' + if self.trusted_signers: + s += '\n' + for signer in self.trusted_signers: + if signer == 'Self': + s += ' \n' + else: + s += ' %s\n' % signer + s += '\n' + if self.logging: + s += '\n' + s += ' %s\n' % self.logging.bucket + s += ' %s\n' % self.logging.prefix + s += '\n' + if self.default_root_object: + dro = self.default_root_object + s += '%s\n' % dro + s += '\n' + return s + + def startElement(self, name, attrs, connection): + if name == 'TrustedSigners': + self.trusted_signers = TrustedSigners() + return self.trusted_signers + elif name == 'Logging': + self.logging = LoggingInfo() + return self.logging + elif name == 'S3Origin': + self.origin = S3Origin() + return self.origin + elif name == 'CustomOrigin': + self.origin = CustomOrigin() + return self.origin + else: + return None + + def endElement(self, name, value, connection): + if name == 'CNAME': + self.cnames.append(value) + elif name == 'Comment': + self.comment = value + elif name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'CallerReference': + self.caller_reference = value + elif name == 'DefaultRootObject': + self.default_root_object = value + else: + setattr(self, name, value) + +class StreamingDistributionConfig(DistributionConfig): + + def __init__(self, connection=None, origin='', enabled=False, + caller_reference='', cnames=None, comment='', + trusted_signers=None, logging=None): + super(StreamingDistributionConfig, self).__init__(connection=connection, + origin=origin, enabled=enabled, + caller_reference=caller_reference, + cnames=cnames, comment=comment, + trusted_signers=trusted_signers, + logging=logging) + def to_xml(self): + s = '\n' + s += '\n' + if self.origin: + s += self.origin.to_xml() + s += ' %s\n' % self.caller_reference + for cname in self.cnames: + s += ' %s\n' % cname + if self.comment: + s += ' %s\n' % self.comment + s += ' ' + if self.enabled: + s += 'true' + else: + s += 'false' + s += '\n' + if self.trusted_signers: + s += '\n' + for signer in self.trusted_signers: + if signer == 'Self': + s += ' \n' + else: + s += ' %s\n' % signer + s += '\n' + if self.logging: + s += '\n' + s += ' %s\n' % self.logging.bucket + s += ' %s\n' % self.logging.prefix + s += '\n' + s += '\n' + return s + +class DistributionSummary(object): + + def __init__(self, connection=None, domain_name='', id='', + last_modified_time=None, status='', origin=None, + cname='', comment='', enabled=False): + self.connection = connection + self.domain_name = domain_name + self.id = id + self.last_modified_time = last_modified_time + self.status = status + self.origin = origin + self.enabled = enabled + self.cnames = [] + if cname: + self.cnames.append(cname) + self.comment = comment + self.trusted_signers = None + self.etag = None + self.streaming = False + + def __repr__(self): + return "DistributionSummary:%s" % self.domain_name + + def startElement(self, name, attrs, connection): + if name == 'TrustedSigners': + self.trusted_signers = TrustedSigners() + return self.trusted_signers + elif name == 'S3Origin': + self.origin = S3Origin() + return self.origin + elif name == 'CustomOrigin': + self.origin = CustomOrigin() + return self.origin + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'Status': + self.status = value + elif name == 'LastModifiedTime': + self.last_modified_time = value + elif name == 'DomainName': + self.domain_name = value + elif name == 'Origin': + self.origin = value + elif name == 'CNAME': + self.cnames.append(value) + elif name == 'Comment': + self.comment = value + elif name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'StreamingDistributionSummary': + self.streaming = True + else: + setattr(self, name, value) + + def get_distribution(self): + return self.connection.get_distribution_info(self.id) + +class StreamingDistributionSummary(DistributionSummary): + + def get_distribution(self): + return self.connection.get_streaming_distribution_info(self.id) + +class Distribution(object): + + def __init__(self, connection=None, config=None, domain_name='', + id='', last_modified_time=None, status=''): + self.connection = connection + self.config = config + self.domain_name = domain_name + self.id = id + self.last_modified_time = last_modified_time + self.status = status + self.in_progress_invalidation_batches = 0 + self.active_signers = None + self.etag = None + self._bucket = None + self._object_class = Object + + def __repr__(self): + return "Distribution:%s" % self.domain_name + + def startElement(self, name, attrs, connection): + if name == 'DistributionConfig': + self.config = DistributionConfig() + return self.config + elif name == 'ActiveTrustedSigners': + self.active_signers = ActiveTrustedSigners() + return self.active_signers + else: + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'LastModifiedTime': + self.last_modified_time = value + elif name == 'Status': + self.status = value + elif name == 'InProgressInvalidationBatches': + self.in_progress_invalidation_batches = int(value) + elif name == 'DomainName': + self.domain_name = value + else: + setattr(self, name, value) + + def update(self, enabled=None, cnames=None, comment=None): + """ + Update the configuration of the Distribution. The only values + of the DistributionConfig that can be directly updated are: + + * CNAMES + * Comment + * Whether the Distribution is enabled or not + + Any changes to the ``trusted_signers`` or ``origin`` properties of + this distribution's current config object will also be included in + the update. Therefore, to set the origin access identity for this + distribution, set ``Distribution.config.origin.origin_access_identity`` + before calling this update method. + + :type enabled: bool + :param enabled: Whether the Distribution is active or not. + + :type cnames: list of str + :param cnames: The DNS CNAME's associated with this + Distribution. Maximum of 10 values. + + :type comment: str or unicode + :param comment: The comment associated with the Distribution. + + """ + new_config = DistributionConfig(self.connection, self.config.origin, + self.config.enabled, self.config.caller_reference, + self.config.cnames, self.config.comment, + self.config.trusted_signers, + self.config.default_root_object) + if enabled is not None: + new_config.enabled = enabled + if cnames is not None: + new_config.cnames = cnames + if comment is not None: + new_config.comment = comment + self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config) + self.config = new_config + self._object_class = Object + + def enable(self): + """ + Activate the Distribution. A convenience wrapper around + the update method. + """ + self.update(enabled=True) + + def disable(self): + """ + Deactivate the Distribution. A convenience wrapper around + the update method. + """ + self.update(enabled=False) + + def delete(self): + """ + Delete this CloudFront Distribution. The content + associated with the Distribution is not deleted from + the underlying Origin bucket in S3. + """ + self.connection.delete_distribution(self.id, self.etag) + + def _get_bucket(self): + if isinstance(self.config.origin, S3Origin): + if not self._bucket: + bucket_dns_name = self.config.origin.dns_name + bucket_name = bucket_dns_name.replace('.s3.amazonaws.com', '') + from boto.s3.connection import S3Connection + s3 = S3Connection(self.connection.aws_access_key_id, + self.connection.aws_secret_access_key, + proxy=self.connection.proxy, + proxy_port=self.connection.proxy_port, + proxy_user=self.connection.proxy_user, + proxy_pass=self.connection.proxy_pass) + self._bucket = s3.get_bucket(bucket_name) + self._bucket.distribution = self + self._bucket.set_key_class(self._object_class) + return self._bucket + else: + raise NotImplementedError('Unable to get_objects on CustomOrigin') + + def get_objects(self): + """ + Return a list of all content objects in this distribution. + + :rtype: list of :class:`boto.cloudfront.object.Object` + :return: The content objects + """ + bucket = self._get_bucket() + objs = [] + for key in bucket: + objs.append(key) + return objs + + def set_permissions(self, object, replace=False): + """ + Sets the S3 ACL grants for the given object to the appropriate + value based on the type of Distribution. If the Distribution + is serving private content the ACL will be set to include the + Origin Access Identity associated with the Distribution. If + the Distribution is serving public content the content will + be set up with "public-read". + + :type object: :class:`boto.cloudfront.object.Object` + :param enabled: The Object whose ACL is being set + + :type replace: bool + :param replace: If False, the Origin Access Identity will be + appended to the existing ACL for the object. + If True, the ACL for the object will be + completely replaced with one that grants + READ permission to the Origin Access Identity. + + """ + if isinstance(self.config.origin, S3Origin): + if self.config.origin.origin_access_identity: + id = self.config.origin.origin_access_identity.split('/')[-1] + oai = self.connection.get_origin_access_identity_info(id) + policy = object.get_acl() + if replace: + policy.acl = ACL() + policy.acl.add_user_grant('READ', oai.s3_user_id) + object.set_acl(policy) + else: + object.set_canned_acl('public-read') + + def set_permissions_all(self, replace=False): + """ + Sets the S3 ACL grants for all objects in the Distribution + to the appropriate value based on the type of Distribution. + + :type replace: bool + :param replace: If False, the Origin Access Identity will be + appended to the existing ACL for the object. + If True, the ACL for the object will be + completely replaced with one that grants + READ permission to the Origin Access Identity. + + """ + bucket = self._get_bucket() + for key in bucket: + self.set_permissions(key, replace) + + def add_object(self, name, content, headers=None, replace=True): + """ + Adds a new content object to the Distribution. The content + for the object will be copied to a new Key in the S3 Bucket + and the permissions will be set appropriately for the type + of Distribution. + + :type name: str or unicode + :param name: The name or key of the new object. + + :type content: file-like object + :param content: A file-like object that contains the content + for the new object. + + :type headers: dict + :param headers: A dictionary containing additional headers + you would like associated with the new + object in S3. + + :rtype: :class:`boto.cloudfront.object.Object` + :return: The newly created object. + """ + if self.config.origin.origin_access_identity: + policy = 'private' + else: + policy = 'public-read' + bucket = self._get_bucket() + object = bucket.new_key(name) + object.set_contents_from_file(content, headers=headers, policy=policy) + if self.config.origin.origin_access_identity: + self.set_permissions(object, replace) + return object + + def create_signed_url(self, url, keypair_id, + expire_time=None, valid_after_time=None, + ip_address=None, policy_url=None, + private_key_file=None, private_key_string=None): + """ + Creates a signed CloudFront URL that is only valid within the specified + parameters. + + :type url: str + :param url: The URL of the protected object. + + :type keypair_id: str + :param keypair_id: The keypair ID of the Amazon KeyPair used to sign + theURL. This ID MUST correspond to the private key + specified with private_key_file or private_key_string. + + :type expire_time: int + :param expire_time: The expiry time of the URL. If provided, the URL + will expire after the time has passed. If not provided the URL will + never expire. Format is a unix epoch. + Use time.time() + duration_in_sec. + + :type valid_after_time: int + :param valid_after_time: If provided, the URL will not be valid until + after valid_after_time. Format is a unix epoch. + Use time.time() + secs_until_valid. + + :type ip_address: str + :param ip_address: If provided, only allows access from the specified + IP address. Use '192.168.0.10' for a single IP or + use '192.168.0.0/24' CIDR notation for a subnet. + + :type policy_url: str + :param policy_url: If provided, allows the signature to contain + wildcard globs in the URL. For example, you could + provide: 'http://example.com/media/\*' and the policy + and signature would allow access to all contents of + the media subdirectory. If not specified, only + allow access to the exact url provided in 'url'. + + :type private_key_file: str or file object. + :param private_key_file: If provided, contains the filename of the + private key file used for signing or an open + file object containing the private key + contents. Only one of private_key_file or + private_key_string can be provided. + + :type private_key_string: str + :param private_key_string: If provided, contains the private key string + used for signing. Only one of private_key_file or + private_key_string can be provided. + + :rtype: str + :return: The signed URL. + """ + # Get the required parameters + params = self._create_signing_params( + url=url, keypair_id=keypair_id, expire_time=expire_time, + valid_after_time=valid_after_time, ip_address=ip_address, + policy_url=policy_url, private_key_file=private_key_file, + private_key_string=private_key_string) + + #combine these into a full url + if "?" in url: + sep = "&" + else: + sep = "?" + signed_url_params = [] + for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]: + if key in params: + param = "%s=%s" % (key, params[key]) + signed_url_params.append(param) + signed_url = url + sep + "&".join(signed_url_params) + return signed_url + + def _create_signing_params(self, url, keypair_id, + expire_time=None, valid_after_time=None, + ip_address=None, policy_url=None, + private_key_file=None, private_key_string=None): + """ + Creates the required URL parameters for a signed URL. + """ + params = {} + # Check if we can use a canned policy + if expire_time and not valid_after_time and not ip_address and not policy_url: + # we manually construct this policy string to ensure formatting + # matches signature + policy = self._canned_policy(url, expire_time) + params["Expires"] = str(expire_time) + else: + # If no policy_url is specified, default to the full url. + if policy_url is None: + policy_url = url + # Can't use canned policy + policy = self._custom_policy(policy_url, expires=expire_time, + valid_after=valid_after_time, + ip_address=ip_address) + + encoded_policy = self._url_base64_encode(policy) + params["Policy"] = encoded_policy + #sign the policy + signature = self._sign_string(policy, private_key_file, private_key_string) + #now base64 encode the signature (URL safe as well) + encoded_signature = self._url_base64_encode(signature) + params["Signature"] = encoded_signature + params["Key-Pair-Id"] = keypair_id + return params + + @staticmethod + def _canned_policy(resource, expires): + """ + Creates a canned policy string. + """ + policy = ('{"Statement":[{"Resource":"%(resource)s",' + '"Condition":{"DateLessThan":{"AWS:EpochTime":' + '%(expires)s}}}]}' % locals()) + return policy + + @staticmethod + def _custom_policy(resource, expires=None, valid_after=None, ip_address=None): + """ + Creates a custom policy string based on the supplied parameters. + """ + condition = {} + # SEE: http://docs.amazonwebservices.com/AmazonCloudFront/latest/DeveloperGuide/RestrictingAccessPrivateContent.html#CustomPolicy + # The 'DateLessThan' property is required. + if not expires: + # Defaults to ONE day + expires = int(time.time()) + 86400 + condition["DateLessThan"] = {"AWS:EpochTime": expires} + if valid_after: + condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after} + if ip_address: + if '/' not in ip_address: + ip_address += "/32" + condition["IpAddress"] = {"AWS:SourceIp": ip_address} + policy = {"Statement": [{ + "Resource": resource, + "Condition": condition}]} + return json.dumps(policy, separators=(",", ":")) + + @staticmethod + def _sign_string(message, private_key_file=None, private_key_string=None): + """ + Signs a string for use with Amazon CloudFront. + Requires the rsa library be installed. + """ + try: + import rsa + except ImportError: + raise NotImplementedError("Boto depends on the python rsa " + "library to generate signed URLs for " + "CloudFront") + # Make sure only one of private_key_file and private_key_string is set + if private_key_file and private_key_string: + raise ValueError("Only specify the private_key_file or the private_key_string not both") + if not private_key_file and not private_key_string: + raise ValueError("You must specify one of private_key_file or private_key_string") + # If private_key_file is a file name, open it and read it + if private_key_string is None: + if isinstance(private_key_file, six.string_types): + with open(private_key_file, 'r') as file_handle: + private_key_string = file_handle.read() + # Otherwise, treat it like a file + else: + private_key_string = private_key_file.read() + + # Sign it! + private_key = rsa.PrivateKey.load_pkcs1(private_key_string) + signature = rsa.sign(str(message), private_key, 'SHA-1') + return signature + + @staticmethod + def _url_base64_encode(msg): + """ + Base64 encodes a string using the URL-safe characters specified by + Amazon. + """ + msg_base64 = base64.b64encode(msg) + msg_base64 = msg_base64.replace('+', '-') + msg_base64 = msg_base64.replace('=', '_') + msg_base64 = msg_base64.replace('/', '~') + return msg_base64 + +class StreamingDistribution(Distribution): + + def __init__(self, connection=None, config=None, domain_name='', + id='', last_modified_time=None, status=''): + super(StreamingDistribution, self).__init__(connection, config, + domain_name, id, last_modified_time, status) + self._object_class = StreamingObject + + def startElement(self, name, attrs, connection): + if name == 'StreamingDistributionConfig': + self.config = StreamingDistributionConfig() + return self.config + else: + return super(StreamingDistribution, self).startElement(name, attrs, + connection) + + def update(self, enabled=None, cnames=None, comment=None): + """ + Update the configuration of the StreamingDistribution. The only values + of the StreamingDistributionConfig that can be directly updated are: + + * CNAMES + * Comment + * Whether the Distribution is enabled or not + + Any changes to the ``trusted_signers`` or ``origin`` properties of + this distribution's current config object will also be included in + the update. Therefore, to set the origin access identity for this + distribution, set + ``StreamingDistribution.config.origin.origin_access_identity`` + before calling this update method. + + :type enabled: bool + :param enabled: Whether the StreamingDistribution is active or not. + + :type cnames: list of str + :param cnames: The DNS CNAME's associated with this + Distribution. Maximum of 10 values. + + :type comment: str or unicode + :param comment: The comment associated with the Distribution. + + """ + new_config = StreamingDistributionConfig(self.connection, + self.config.origin, + self.config.enabled, + self.config.caller_reference, + self.config.cnames, + self.config.comment, + self.config.trusted_signers) + if enabled is not None: + new_config.enabled = enabled + if cnames is not None: + new_config.cnames = cnames + if comment is not None: + new_config.comment = comment + self.etag = self.connection.set_streaming_distribution_config(self.id, + self.etag, + new_config) + self.config = new_config + self._object_class = StreamingObject + + def delete(self): + self.connection.delete_streaming_distribution(self.id, self.etag) + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/exception.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/exception.py new file mode 100644 index 0000000000000000000000000000000000000000..768064210c27e935dda071d1d077497c1bc3ed07 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/exception.py @@ -0,0 +1,26 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import BotoServerError + +class CloudFrontServerError(BotoServerError): + + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/identity.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/identity.py new file mode 100644 index 0000000000000000000000000000000000000000..de79c8ac7685a3fa0c7ded1eb49a51f17eeb103d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/identity.py @@ -0,0 +1,121 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid + +class OriginAccessIdentity(object): + def __init__(self, connection=None, config=None, id='', + s3_user_id='', comment=''): + self.connection = connection + self.config = config + self.id = id + self.s3_user_id = s3_user_id + self.comment = comment + self.etag = None + + def startElement(self, name, attrs, connection): + if name == 'CloudFrontOriginAccessIdentityConfig': + self.config = OriginAccessIdentityConfig() + return self.config + else: + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'S3CanonicalUserId': + self.s3_user_id = value + elif name == 'Comment': + self.comment = value + else: + setattr(self, name, value) + + def update(self, comment=None): + new_config = OriginAccessIdentityConfig(self.connection, + self.config.caller_reference, + self.config.comment) + if comment is not None: + new_config.comment = comment + self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config) + self.config = new_config + + def delete(self): + return self.connection.delete_origin_access_identity(self.id, self.etag) + + def uri(self): + return 'origin-access-identity/cloudfront/%s' % self.id + + +class OriginAccessIdentityConfig(object): + def __init__(self, connection=None, caller_reference='', comment=''): + self.connection = connection + if caller_reference: + self.caller_reference = caller_reference + else: + self.caller_reference = str(uuid.uuid4()) + self.comment = comment + + def to_xml(self): + s = '\n' + s += '\n' + s += ' %s\n' % self.caller_reference + if self.comment: + s += ' %s\n' % self.comment + s += '\n' + return s + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Comment': + self.comment = value + elif name == 'CallerReference': + self.caller_reference = value + else: + setattr(self, name, value) + + +class OriginAccessIdentitySummary(object): + def __init__(self, connection=None, id='', + s3_user_id='', comment=''): + self.connection = connection + self.id = id + self.s3_user_id = s3_user_id + self.comment = comment + self.etag = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'S3CanonicalUserId': + self.s3_user_id = value + elif name == 'Comment': + self.comment = value + else: + setattr(self, name, value) + + def get_origin_access_identity(self): + return self.connection.get_origin_access_identity_info(self.id) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/invalidation.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/invalidation.py new file mode 100644 index 0000000000000000000000000000000000000000..58adf81fd3aad0e87b6dcabd83df1de6f3cd672c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/invalidation.py @@ -0,0 +1,216 @@ +# Copyright (c) 2006-2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid + +from boto.compat import urllib +from boto.resultset import ResultSet + + +class InvalidationBatch(object): + """A simple invalidation request. + :see: http://docs.amazonwebservices.com/AmazonCloudFront/2010-08-01/APIReference/index.html?InvalidationBatchDatatype.html + """ + + def __init__(self, paths=None, connection=None, distribution=None, caller_reference=''): + """Create a new invalidation request: + :paths: An array of paths to invalidate + """ + self.paths = paths or [] + self.distribution = distribution + self.caller_reference = caller_reference + if not self.caller_reference: + self.caller_reference = str(uuid.uuid4()) + + # If we passed in a distribution, + # then we use that as the connection object + if distribution: + self.connection = distribution + else: + self.connection = connection + + def __repr__(self): + return '' % self.id + + def add(self, path): + """Add another path to this invalidation request""" + return self.paths.append(path) + + def remove(self, path): + """Remove a path from this invalidation request""" + return self.paths.remove(path) + + def __iter__(self): + return iter(self.paths) + + def __getitem__(self, i): + return self.paths[i] + + def __setitem__(self, k, v): + self.paths[k] = v + + def escape(self, p): + """Escape a path, make sure it begins with a slash and contains no invalid characters""" + if not p[0] == "/": + p = "/%s" % p + return urllib.parse.quote(p) + + def to_xml(self): + """Get this batch as XML""" + assert self.connection is not None + s = '\n' + s += '\n' % self.connection.Version + for p in self.paths: + s += ' %s\n' % self.escape(p) + s += ' %s\n' % self.caller_reference + s += '\n' + return s + + def startElement(self, name, attrs, connection): + if name == "InvalidationBatch": + self.paths = [] + return None + + def endElement(self, name, value, connection): + if name == 'Path': + self.paths.append(value) + elif name == "Status": + self.status = value + elif name == "Id": + self.id = value + elif name == "CreateTime": + self.create_time = value + elif name == "CallerReference": + self.caller_reference = value + return None + + +class InvalidationListResultSet(object): + """ + A resultset for listing invalidations on a given CloudFront distribution. + Implements the iterator interface and transparently handles paging results + from CF so even if you have many thousands of invalidations on the + distribution you can iterate over all invalidations in a reasonably + efficient manner. + """ + def __init__(self, markers=None, connection=None, distribution_id=None, + invalidations=None, marker='', next_marker=None, + max_items=None, is_truncated=False): + self.markers = markers or [] + self.connection = connection + self.distribution_id = distribution_id + self.marker = marker + self.next_marker = next_marker + self.max_items = max_items + self.auto_paginate = max_items is None + self.is_truncated = is_truncated + self._inval_cache = invalidations or [] + + def __iter__(self): + """ + A generator function for listing invalidation requests for a given + CloudFront distribution. + """ + conn = self.connection + distribution_id = self.distribution_id + result_set = self + for inval in result_set._inval_cache: + yield inval + if not self.auto_paginate: + return + while result_set.is_truncated: + result_set = conn.get_invalidation_requests(distribution_id, + marker=result_set.next_marker, + max_items=result_set.max_items) + for i in result_set._inval_cache: + yield i + + def startElement(self, name, attrs, connection): + for root_elem, handler in self.markers: + if name == root_elem: + obj = handler(connection, distribution_id=self.distribution_id) + self._inval_cache.append(obj) + return obj + + def endElement(self, name, value, connection): + if name == 'IsTruncated': + self.is_truncated = self.to_boolean(value) + elif name == 'Marker': + self.marker = value + elif name == 'NextMarker': + self.next_marker = value + elif name == 'MaxItems': + self.max_items = int(value) + + def to_boolean(self, value, true_value='true'): + if value == true_value: + return True + else: + return False + +class InvalidationSummary(object): + """ + Represents InvalidationSummary complex type in CloudFront API that lists + the id and status of a given invalidation request. + """ + def __init__(self, connection=None, distribution_id=None, id='', + status=''): + self.connection = connection + self.distribution_id = distribution_id + self.id = id + self.status = status + + def __repr__(self): + return '' % self.id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'Status': + self.status = value + + def get_distribution(self): + """ + Returns a Distribution object representing the parent CloudFront + distribution of the invalidation request listed in the + InvalidationSummary. + + :rtype: :class:`boto.cloudfront.distribution.Distribution` + :returns: A Distribution object representing the parent CloudFront + distribution of the invalidation request listed in the + InvalidationSummary + """ + return self.connection.get_distribution_info(self.distribution_id) + + def get_invalidation_request(self): + """ + Returns an InvalidationBatch object representing the invalidation + request referred to in the InvalidationSummary. + + :rtype: :class:`boto.cloudfront.invalidation.InvalidationBatch` + :returns: An InvalidationBatch object representing the invalidation + request referred to by the InvalidationSummary + """ + return self.connection.invalidation_request_status( + self.distribution_id, self.id) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/logging.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..6c2f4fde2fec516dd6768ce4b5ce7e36de6bf50c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/logging.py @@ -0,0 +1,38 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class LoggingInfo(object): + + def __init__(self, bucket='', prefix=''): + self.bucket = bucket + self.prefix = prefix + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Bucket': + self.bucket = value + elif name == 'Prefix': + self.prefix = value + else: + setattr(self, name, value) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/object.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/object.py new file mode 100644 index 0000000000000000000000000000000000000000..24fc85064cadc60b81e5ff518cfaf821ab20dd51 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/object.py @@ -0,0 +1,48 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.key import Key + +class Object(Key): + + def __init__(self, bucket, name=None): + super(Object, self).__init__(bucket, name=name) + self.distribution = bucket.distribution + + def __repr__(self): + return '' % (self.distribution.config.origin, self.name) + + def url(self, scheme='http'): + url = '%s://' % scheme + url += self.distribution.domain_name + if scheme.lower().startswith('rtmp'): + url += '/cfx/st/' + else: + url += '/' + url += self.name + return url + +class StreamingObject(Object): + + def url(self, scheme='rtmp'): + return super(StreamingObject, self).url(scheme) + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/origin.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/origin.py new file mode 100644 index 0000000000000000000000000000000000000000..b88ec7e7f8ea528c44d9e0c27297e6c3d3c8e0fe --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/origin.py @@ -0,0 +1,150 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.cloudfront.identity import OriginAccessIdentity + +def get_oai_value(origin_access_identity): + if isinstance(origin_access_identity, OriginAccessIdentity): + return origin_access_identity.uri() + else: + return origin_access_identity + +class S3Origin(object): + """ + Origin information to associate with the distribution. + If your distribution will use an Amazon S3 origin, + then you use the S3Origin element. + """ + + def __init__(self, dns_name=None, origin_access_identity=None): + """ + :param dns_name: The DNS name of your Amazon S3 bucket to + associate with the distribution. + For example: mybucket.s3.amazonaws.com. + :type dns_name: str + + :param origin_access_identity: The CloudFront origin access + identity to associate with the + distribution. If you want the + distribution to serve private content, + include this element; if you want the + distribution to serve public content, + remove this element. + :type origin_access_identity: str + + """ + self.dns_name = dns_name + self.origin_access_identity = origin_access_identity + + def __repr__(self): + return '' % self.dns_name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DNSName': + self.dns_name = value + elif name == 'OriginAccessIdentity': + self.origin_access_identity = value + else: + setattr(self, name, value) + + def to_xml(self): + s = ' \n' + s += ' %s\n' % self.dns_name + if self.origin_access_identity: + val = get_oai_value(self.origin_access_identity) + s += ' %s\n' % val + s += ' \n' + return s + +class CustomOrigin(object): + """ + Origin information to associate with the distribution. + If your distribution will use a non-Amazon S3 origin, + then you use the CustomOrigin element. + """ + + def __init__(self, dns_name=None, http_port=80, https_port=443, + origin_protocol_policy=None): + """ + :param dns_name: The DNS name of your Amazon S3 bucket to + associate with the distribution. + For example: mybucket.s3.amazonaws.com. + :type dns_name: str + + :param http_port: The HTTP port the custom origin listens on. + :type http_port: int + + :param https_port: The HTTPS port the custom origin listens on. + :type http_port: int + + :param origin_protocol_policy: The origin protocol policy to + apply to your origin. If you + specify http-only, CloudFront + will use HTTP only to access the origin. + If you specify match-viewer, CloudFront + will fetch from your origin using HTTP + or HTTPS, based on the protocol of the + viewer request. + :type origin_protocol_policy: str + + """ + self.dns_name = dns_name + self.http_port = http_port + self.https_port = https_port + self.origin_protocol_policy = origin_protocol_policy + + def __repr__(self): + return '' % self.dns_name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DNSName': + self.dns_name = value + elif name == 'HTTPPort': + try: + self.http_port = int(value) + except ValueError: + self.http_port = value + elif name == 'HTTPSPort': + try: + self.https_port = int(value) + except ValueError: + self.https_port = value + elif name == 'OriginProtocolPolicy': + self.origin_protocol_policy = value + else: + setattr(self, name, value) + + def to_xml(self): + s = ' \n' + s += ' %s\n' % self.dns_name + s += ' %d\n' % self.http_port + s += ' %d\n' % self.https_port + s += ' %s\n' % self.origin_protocol_policy + s += ' \n' + return s + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/signers.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/signers.py new file mode 100644 index 0000000000000000000000000000000000000000..dcc9fc9ea3b0829edcbfa65a6a0611248d01ccc0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudfront/signers.py @@ -0,0 +1,59 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Signer(object): + def __init__(self): + self.id = None + self.key_pair_ids = [] + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Self': + self.id = 'Self' + elif name == 'AwsAccountNumber': + self.id = value + elif name == 'KeyPairId': + self.key_pair_ids.append(value) + + +class ActiveTrustedSigners(list): + def startElement(self, name, attrs, connection): + if name == 'Signer': + s = Signer() + self.append(s) + return s + + def endElement(self, name, value, connection): + pass + + +class TrustedSigners(list): + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Self': + self.append(name) + elif name == 'AwsAccountNumber': + self.append(value) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudhsm/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudhsm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b13fe3fcd7d35a84d926a7e3f8bb392f82e9c4ce --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudhsm/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS CloudHSM service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cloudhsm.layer1 import CloudHSMConnection + return get_regions('cloudhsm', connection_cls=CloudHSMConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudhsm/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudhsm/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..1e14abe175cd55f6efb2195fc6ef2c09370c96b1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudhsm/exceptions.py @@ -0,0 +1,35 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class InvalidRequestException(BotoServerError): + pass + + +class CloudHsmServiceException(BotoServerError): + pass + + +class CloudHsmInternalException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudhsm/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudhsm/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..e0877736e2e2ed9d090d70a521ef3015c30c9537 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudhsm/layer1.py @@ -0,0 +1,448 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.cloudhsm import exceptions + + +class CloudHSMConnection(AWSQueryConnection): + """ + AWS CloudHSM Service + """ + APIVersion = "2014-05-30" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudhsm.us-east-1.amazonaws.com" + ServiceName = "CloudHSM" + TargetPrefix = "CloudHsmFrontendService" + ResponseError = JSONResponseError + + _faults = { + "InvalidRequestException": exceptions.InvalidRequestException, + "CloudHsmServiceException": exceptions.CloudHsmServiceException, + "CloudHsmInternalException": exceptions.CloudHsmInternalException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CloudHSMConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_hapg(self, label): + """ + Creates a high-availability partition group. A high- + availability partition group is a group of partitions that + spans multiple physical HSMs. + + :type label: string + :param label: The label of the new high-availability partition group. + + """ + params = {'Label': label, } + return self.make_request(action='CreateHapg', + body=json.dumps(params)) + + def create_hsm(self, subnet_id, ssh_key, iam_role_arn, subscription_type, + eni_ip=None, external_id=None, client_token=None, + syslog_ip=None): + """ + Creates an uninitialized HSM instance. Running this command + provisions an HSM appliance and will result in charges to your + AWS account for the HSM. + + :type subnet_id: string + :param subnet_id: The identifier of the subnet in your VPC in which to + place the HSM. + + :type ssh_key: string + :param ssh_key: The SSH public key to install on the HSM. + + :type eni_ip: string + :param eni_ip: The IP address to assign to the HSM's ENI. + + :type iam_role_arn: string + :param iam_role_arn: The ARN of an IAM role to enable the AWS CloudHSM + service to allocate an ENI on your behalf. + + :type external_id: string + :param external_id: The external ID from **IamRoleArn**, if present. + + :type subscription_type: string + :param subscription_type: The subscription type. + + :type client_token: string + :param client_token: A user-defined token to ensure idempotence. + Subsequent calls to this action with the same token will be + ignored. + + :type syslog_ip: string + :param syslog_ip: The IP address for the syslog monitoring server. + + """ + params = { + 'SubnetId': subnet_id, + 'SshKey': ssh_key, + 'IamRoleArn': iam_role_arn, + 'SubscriptionType': subscription_type, + } + if eni_ip is not None: + params['EniIp'] = eni_ip + if external_id is not None: + params['ExternalId'] = external_id + if client_token is not None: + params['ClientToken'] = client_token + if syslog_ip is not None: + params['SyslogIp'] = syslog_ip + return self.make_request(action='CreateHsm', + body=json.dumps(params)) + + def create_luna_client(self, certificate, label=None): + """ + Creates an HSM client. + + :type label: string + :param label: The label for the client. + + :type certificate: string + :param certificate: The contents of a Base64-Encoded X.509 v3 + certificate to be installed on the HSMs used by this client. + + """ + params = {'Certificate': certificate, } + if label is not None: + params['Label'] = label + return self.make_request(action='CreateLunaClient', + body=json.dumps(params)) + + def delete_hapg(self, hapg_arn): + """ + Deletes a high-availability partition group. + + :type hapg_arn: string + :param hapg_arn: The ARN of the high-availability partition group to + delete. + + """ + params = {'HapgArn': hapg_arn, } + return self.make_request(action='DeleteHapg', + body=json.dumps(params)) + + def delete_hsm(self, hsm_arn): + """ + Deletes an HSM. Once complete, this operation cannot be undone + and your key material cannot be recovered. + + :type hsm_arn: string + :param hsm_arn: The ARN of the HSM to delete. + + """ + params = {'HsmArn': hsm_arn, } + return self.make_request(action='DeleteHsm', + body=json.dumps(params)) + + def delete_luna_client(self, client_arn): + """ + Deletes a client. + + :type client_arn: string + :param client_arn: The ARN of the client to delete. + + """ + params = {'ClientArn': client_arn, } + return self.make_request(action='DeleteLunaClient', + body=json.dumps(params)) + + def describe_hapg(self, hapg_arn): + """ + Retrieves information about a high-availability partition + group. + + :type hapg_arn: string + :param hapg_arn: The ARN of the high-availability partition group to + describe. + + """ + params = {'HapgArn': hapg_arn, } + return self.make_request(action='DescribeHapg', + body=json.dumps(params)) + + def describe_hsm(self, hsm_arn=None, hsm_serial_number=None): + """ + Retrieves information about an HSM. You can identify the HSM + by its ARN or its serial number. + + :type hsm_arn: string + :param hsm_arn: The ARN of the HSM. Either the HsmArn or the + SerialNumber parameter must be specified. + + :type hsm_serial_number: string + :param hsm_serial_number: The serial number of the HSM. Either the + HsmArn or the HsmSerialNumber parameter must be specified. + + """ + params = {} + if hsm_arn is not None: + params['HsmArn'] = hsm_arn + if hsm_serial_number is not None: + params['HsmSerialNumber'] = hsm_serial_number + return self.make_request(action='DescribeHsm', + body=json.dumps(params)) + + def describe_luna_client(self, client_arn=None, + certificate_fingerprint=None): + """ + Retrieves information about an HSM client. + + :type client_arn: string + :param client_arn: The ARN of the client. + + :type certificate_fingerprint: string + :param certificate_fingerprint: The certificate fingerprint. + + """ + params = {} + if client_arn is not None: + params['ClientArn'] = client_arn + if certificate_fingerprint is not None: + params['CertificateFingerprint'] = certificate_fingerprint + return self.make_request(action='DescribeLunaClient', + body=json.dumps(params)) + + def get_config(self, client_arn, client_version, hapg_list): + """ + Gets the configuration files necessary to connect to all high + availability partition groups the client is associated with. + + :type client_arn: string + :param client_arn: The ARN of the client. + + :type client_version: string + :param client_version: The client version. + + :type hapg_list: list + :param hapg_list: A list of ARNs that identify the high-availability + partition groups that are associated with the client. + + """ + params = { + 'ClientArn': client_arn, + 'ClientVersion': client_version, + 'HapgList': hapg_list, + } + return self.make_request(action='GetConfig', + body=json.dumps(params)) + + def list_available_zones(self): + """ + Lists the Availability Zones that have available AWS CloudHSM + capacity. + + + """ + params = {} + return self.make_request(action='ListAvailableZones', + body=json.dumps(params)) + + def list_hapgs(self, next_token=None): + """ + Lists the high-availability partition groups for the account. + + This operation supports pagination with the use of the + NextToken member. If more results are available, the NextToken + member of the response contains a token that you pass in the + next call to ListHapgs to retrieve the next set of items. + + :type next_token: string + :param next_token: The NextToken value from a previous call to + ListHapgs. Pass null if this is the first call. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListHapgs', + body=json.dumps(params)) + + def list_hsms(self, next_token=None): + """ + Retrieves the identifiers of all of the HSMs provisioned for + the current customer. + + This operation supports pagination with the use of the + NextToken member. If more results are available, the NextToken + member of the response contains a token that you pass in the + next call to ListHsms to retrieve the next set of items. + + :type next_token: string + :param next_token: The NextToken value from a previous call to + ListHsms. Pass null if this is the first call. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListHsms', + body=json.dumps(params)) + + def list_luna_clients(self, next_token=None): + """ + Lists all of the clients. + + This operation supports pagination with the use of the + NextToken member. If more results are available, the NextToken + member of the response contains a token that you pass in the + next call to ListLunaClients to retrieve the next set of + items. + + :type next_token: string + :param next_token: The NextToken value from a previous call to + ListLunaClients. Pass null if this is the first call. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListLunaClients', + body=json.dumps(params)) + + def modify_hapg(self, hapg_arn, label=None, partition_serial_list=None): + """ + Modifies an existing high-availability partition group. + + :type hapg_arn: string + :param hapg_arn: The ARN of the high-availability partition group to + modify. + + :type label: string + :param label: The new label for the high-availability partition group. + + :type partition_serial_list: list + :param partition_serial_list: The list of partition serial numbers to + make members of the high-availability partition group. + + """ + params = {'HapgArn': hapg_arn, } + if label is not None: + params['Label'] = label + if partition_serial_list is not None: + params['PartitionSerialList'] = partition_serial_list + return self.make_request(action='ModifyHapg', + body=json.dumps(params)) + + def modify_hsm(self, hsm_arn, subnet_id=None, eni_ip=None, + iam_role_arn=None, external_id=None, syslog_ip=None): + """ + Modifies an HSM. + + :type hsm_arn: string + :param hsm_arn: The ARN of the HSM to modify. + + :type subnet_id: string + :param subnet_id: The new identifier of the subnet that the HSM is in. + + :type eni_ip: string + :param eni_ip: The new IP address for the elastic network interface + attached to the HSM. + + :type iam_role_arn: string + :param iam_role_arn: The new IAM role ARN. + + :type external_id: string + :param external_id: The new external ID. + + :type syslog_ip: string + :param syslog_ip: The new IP address for the syslog monitoring server. + + """ + params = {'HsmArn': hsm_arn, } + if subnet_id is not None: + params['SubnetId'] = subnet_id + if eni_ip is not None: + params['EniIp'] = eni_ip + if iam_role_arn is not None: + params['IamRoleArn'] = iam_role_arn + if external_id is not None: + params['ExternalId'] = external_id + if syslog_ip is not None: + params['SyslogIp'] = syslog_ip + return self.make_request(action='ModifyHsm', + body=json.dumps(params)) + + def modify_luna_client(self, client_arn, certificate): + """ + Modifies the certificate used by the client. + + This action can potentially start a workflow to install the + new certificate on the client's HSMs. + + :type client_arn: string + :param client_arn: The ARN of the client. + + :type certificate: string + :param certificate: The new certificate for the client. + + """ + params = { + 'ClientArn': client_arn, + 'Certificate': certificate, + } + return self.make_request(action='ModifyLunaClient', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..451a6bfab83e856d0e643f190cbb5a5ff7b96f9b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/__init__.py @@ -0,0 +1,45 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon CloudSearch service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + import boto.cloudsearch.layer1 + return get_regions( + 'cloudsearch', + connection_cls=boto.cloudsearch.layer1.Layer1 + ) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/document.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/document.py new file mode 100644 index 0000000000000000000000000000000000000000..0a1d9db22cf4a9b434f0e5bff55a30a76ab33b34 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/document.py @@ -0,0 +1,271 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto.exception +from boto.compat import json +import requests +import boto + +class SearchServiceException(Exception): + pass + + +class CommitMismatchError(Exception): + pass + +class EncodingError(Exception): + """ + Content sent for Cloud Search indexing was incorrectly encoded. + + This usually happens when a document is marked as unicode but non-unicode + characters are present. + """ + pass + +class ContentTooLongError(Exception): + """ + Content sent for Cloud Search indexing was too long + + This will usually happen when documents queued for indexing add up to more + than the limit allowed per upload batch (5MB) + + """ + pass + +class DocumentServiceConnection(object): + """ + A CloudSearch document service. + + The DocumentServiceConection is used to add, remove and update documents in + CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document Format). + + To generate an appropriate SDF, use :func:`add` to add or update documents, + as well as :func:`delete` to remove documents. + + Once the set of documents is ready to be index, use :func:`commit` to send the + commands to CloudSearch. + + If there are a lot of documents to index, it may be preferable to split the + generation of SDF data and the actual uploading into CloudSearch. Retrieve + the current SDF with :func:`get_sdf`. If this file is the uploaded into S3, + it can be retrieved back afterwards for upload into CloudSearch using + :func:`add_sdf_from_s3`. + + The SDF is not cleared after a :func:`commit`. If you wish to continue + using the DocumentServiceConnection for another batch upload of commands, + you will need to :func:`clear_sdf` first to stop the previous batch of + commands from being uploaded again. + + """ + + def __init__(self, domain=None, endpoint=None): + self.domain = domain + self.endpoint = endpoint + if not self.endpoint: + self.endpoint = domain.doc_service_endpoint + self.documents_batch = [] + self._sdf = None + + def add(self, _id, version, fields, lang='en'): + """ + Add a document to be processed by the DocumentService + + The document will not actually be added until :func:`commit` is called + + :type _id: string + :param _id: A unique ID used to refer to this document. + + :type version: int + :param version: Version of the document being indexed. If a file is + being reindexed, the version should be higher than the existing one + in CloudSearch. + + :type fields: dict + :param fields: A dictionary of key-value pairs to be uploaded . + + :type lang: string + :param lang: The language code the data is in. Only 'en' is currently + supported + """ + + d = {'type': 'add', 'id': _id, 'version': version, 'lang': lang, + 'fields': fields} + self.documents_batch.append(d) + + def delete(self, _id, version): + """ + Schedule a document to be removed from the CloudSearch service + + The document will not actually be scheduled for removal until :func:`commit` is called + + :type _id: string + :param _id: The unique ID of this document. + + :type version: int + :param version: Version of the document to remove. The delete will only + occur if this version number is higher than the version currently + in the index. + """ + + d = {'type': 'delete', 'id': _id, 'version': version} + self.documents_batch.append(d) + + def get_sdf(self): + """ + Generate the working set of documents in Search Data Format (SDF) + + :rtype: string + :returns: JSON-formatted string of the documents in SDF + """ + + return self._sdf if self._sdf else json.dumps(self.documents_batch) + + def clear_sdf(self): + """ + Clear the working documents from this DocumentServiceConnection + + This should be used after :func:`commit` if the connection will be reused + for another set of documents. + """ + + self._sdf = None + self.documents_batch = [] + + def add_sdf_from_s3(self, key_obj): + """ + Load an SDF from S3 + + Using this method will result in documents added through + :func:`add` and :func:`delete` being ignored. + + :type key_obj: :class:`boto.s3.key.Key` + :param key_obj: An S3 key which contains an SDF + """ + #@todo:: (lucas) would be nice if this could just take an s3://uri..." + + self._sdf = key_obj.get_contents_as_string() + + def commit(self): + """ + Actually send an SDF to CloudSearch for processing + + If an SDF file has been explicitly loaded it will be used. Otherwise, + documents added through :func:`add` and :func:`delete` will be used. + + :rtype: :class:`CommitResponse` + :returns: A summary of documents added and deleted + """ + + sdf = self.get_sdf() + + if ': null' in sdf: + boto.log.error('null value in sdf detected. This will probably raise ' + '500 error.') + index = sdf.index(': null') + boto.log.error(sdf[index - 100:index + 100]) + + url = "http://%s/2011-02-01/documents/batch" % (self.endpoint) + + # Keep-alive is automatic in a post-1.0 requests world. + session = requests.Session() + adapter = requests.adapters.HTTPAdapter( + pool_connections=20, + pool_maxsize=50, + max_retries=5 + ) + session.mount('http://', adapter) + session.mount('https://', adapter) + r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'}) + + return CommitResponse(r, self, sdf) + + +class CommitResponse(object): + """Wrapper for response to Cloudsearch document batch commit. + + :type response: :class:`requests.models.Response` + :param response: Response from Cloudsearch /documents/batch API + + :type doc_service: :class:`boto.cloudsearch.document.DocumentServiceConnection` + :param doc_service: Object containing the documents posted and methods to + retry + + :raises: :class:`boto.exception.BotoServerError` + :raises: :class:`boto.cloudsearch.document.SearchServiceException` + :raises: :class:`boto.cloudsearch.document.EncodingError` + :raises: :class:`boto.cloudsearch.document.ContentTooLongError` + """ + def __init__(self, response, doc_service, sdf): + self.response = response + self.doc_service = doc_service + self.sdf = sdf + + _body = response.content.decode('utf-8') + + try: + self.content = json.loads(_body) + except: + boto.log.error('Error indexing documents.\nResponse Content:\n{0}\n\n' + 'SDF:\n{1}'.format(_body, self.sdf)) + raise boto.exception.BotoServerError(self.response.status_code, '', + body=_body) + + self.status = self.content['status'] + if self.status == 'error': + self.errors = [e.get('message') for e in self.content.get('errors', + [])] + for e in self.errors: + if "Illegal Unicode character" in e: + raise EncodingError("Illegal Unicode character in document") + elif e == "The Content-Length is too long": + raise ContentTooLongError("Content was too long") + if 'adds' not in self.content or 'deletes' not in self.content: + raise SearchServiceException("Error indexing documents" + " => %s" % self.content.get('message', '')) + else: + self.errors = [] + + self.adds = self.content['adds'] + self.deletes = self.content['deletes'] + self._check_num_ops('add', self.adds) + self._check_num_ops('delete', self.deletes) + + def _check_num_ops(self, type_, response_num): + """Raise exception if number of ops in response doesn't match commit + + :type type_: str + :param type_: Type of commit operation: 'add' or 'delete' + + :type response_num: int + :param response_num: Number of adds or deletes in the response. + + :raises: :class:`boto.cloudsearch.document.CommitMismatchError` + """ + commit_num = len([d for d in self.doc_service.documents_batch + if d['type'] == type_]) + + if response_num != commit_num: + raise CommitMismatchError( + 'Incorrect number of {0}s returned. Commit: {1} Response: {2}'\ + .format(type_, commit_num, response_num)) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/domain.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/domain.py new file mode 100644 index 0000000000000000000000000000000000000000..9800b1751246ec005b359a64baa0b4d7f1d2f75f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/domain.py @@ -0,0 +1,394 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.cloudsearch.optionstatus import OptionStatus +from boto.cloudsearch.optionstatus import IndexFieldStatus +from boto.cloudsearch.optionstatus import ServicePoliciesStatus +from boto.cloudsearch.optionstatus import RankExpressionStatus +from boto.cloudsearch.document import DocumentServiceConnection +from boto.cloudsearch.search import SearchConnection + +def handle_bool(value): + if value in [True, 'true', 'True', 'TRUE', 1]: + return True + return False + + +class Domain(object): + """ + A Cloudsearch domain. + + :ivar name: The name of the domain. + + :ivar id: The internally generated unique identifier for the domain. + + :ivar created: A boolean which is True if the domain is + created. It can take several minutes to initialize a domain + when CreateDomain is called. Newly created search domains are + returned with a False value for Created until domain creation + is complete + + :ivar deleted: A boolean which is True if the search domain has + been deleted. The system must clean up resources dedicated to + the search domain when delete is called. Newly deleted + search domains are returned from list_domains with a True + value for deleted for several minutes until resource cleanup + is complete. + + :ivar processing: True if processing is being done to activate the + current domain configuration. + + :ivar num_searchable_docs: The number of documents that have been + submittted to the domain and indexed. + + :ivar requires_index_document: True if index_documents needs to be + called to activate the current domain configuration. + + :ivar search_instance_count: The number of search instances that are + available to process search requests. + + :ivar search_instance_type: The instance type that is being used to + process search requests. + + :ivar search_partition_count: The number of partitions across which + the search index is spread. + """ + + def __init__(self, layer1, data): + self.layer1 = layer1 + self.update_from_data(data) + + def update_from_data(self, data): + self.created = data['created'] + self.deleted = data['deleted'] + self.processing = data['processing'] + self.requires_index_documents = data['requires_index_documents'] + self.domain_id = data['domain_id'] + self.domain_name = data['domain_name'] + self.num_searchable_docs = data['num_searchable_docs'] + self.search_instance_count = data['search_instance_count'] + self.search_instance_type = data.get('search_instance_type', None) + self.search_partition_count = data['search_partition_count'] + self._doc_service = data['doc_service'] + self._search_service = data['search_service'] + + @property + def doc_service_arn(self): + return self._doc_service['arn'] + + @property + def doc_service_endpoint(self): + return self._doc_service['endpoint'] + + @property + def search_service_arn(self): + return self._search_service['arn'] + + @property + def search_service_endpoint(self): + return self._search_service['endpoint'] + + @property + def created(self): + return self._created + + @created.setter + def created(self, value): + self._created = handle_bool(value) + + @property + def deleted(self): + return self._deleted + + @deleted.setter + def deleted(self, value): + self._deleted = handle_bool(value) + + @property + def processing(self): + return self._processing + + @processing.setter + def processing(self, value): + self._processing = handle_bool(value) + + @property + def requires_index_documents(self): + return self._requires_index_documents + + @requires_index_documents.setter + def requires_index_documents(self, value): + self._requires_index_documents = handle_bool(value) + + @property + def search_partition_count(self): + return self._search_partition_count + + @search_partition_count.setter + def search_partition_count(self, value): + self._search_partition_count = int(value) + + @property + def search_instance_count(self): + return self._search_instance_count + + @search_instance_count.setter + def search_instance_count(self, value): + self._search_instance_count = int(value) + + @property + def num_searchable_docs(self): + return self._num_searchable_docs + + @num_searchable_docs.setter + def num_searchable_docs(self, value): + self._num_searchable_docs = int(value) + + @property + def name(self): + return self.domain_name + + @property + def id(self): + return self.domain_id + + def delete(self): + """ + Delete this domain and all index data associated with it. + """ + return self.layer1.delete_domain(self.name) + + def get_stemming(self): + """ + Return a :class:`boto.cloudsearch.option.OptionStatus` object + representing the currently defined stemming options for + the domain. + """ + return OptionStatus(self, None, + self.layer1.describe_stemming_options, + self.layer1.update_stemming_options) + + def get_stopwords(self): + """ + Return a :class:`boto.cloudsearch.option.OptionStatus` object + representing the currently defined stopword options for + the domain. + """ + return OptionStatus(self, None, + self.layer1.describe_stopword_options, + self.layer1.update_stopword_options) + + def get_synonyms(self): + """ + Return a :class:`boto.cloudsearch.option.OptionStatus` object + representing the currently defined synonym options for + the domain. + """ + return OptionStatus(self, None, + self.layer1.describe_synonym_options, + self.layer1.update_synonym_options) + + def get_access_policies(self): + """ + Return a :class:`boto.cloudsearch.option.OptionStatus` object + representing the currently defined access policies for + the domain. + """ + return ServicePoliciesStatus(self, None, + self.layer1.describe_service_access_policies, + self.layer1.update_service_access_policies) + + def index_documents(self): + """ + Tells the search domain to start indexing its documents using + the latest text processing options and IndexFields. This + operation must be invoked to make options whose OptionStatus + has OptioState of RequiresIndexDocuments visible in search + results. + """ + self.layer1.index_documents(self.name) + + def get_index_fields(self, field_names=None): + """ + Return a list of index fields defined for this domain. + """ + data = self.layer1.describe_index_fields(self.name, field_names) + return [IndexFieldStatus(self, d) for d in data] + + def create_index_field(self, field_name, field_type, + default='', facet=False, result=False, searchable=False, + source_attributes=[]): + """ + Defines an ``IndexField``, either replacing an existing + definition or creating a new one. + + :type field_name: string + :param field_name: The name of a field in the search index. + + :type field_type: string + :param field_type: The type of field. Valid values are + uint | literal | text + + :type default: string or int + :param default: The default value for the field. If the + field is of type ``uint`` this should be an integer value. + Otherwise, it's a string. + + :type facet: bool + :param facet: A boolean to indicate whether facets + are enabled for this field or not. Does not apply to + fields of type ``uint``. + + :type results: bool + :param results: A boolean to indicate whether values + of this field can be returned in search results or + used in ranking. Does not apply to fields of type ``uint``. + + :type searchable: bool + :param searchable: A boolean to indicate whether search + is enabled for this field or not. Applies only to fields + of type ``literal``. + + :type source_attributes: list of dicts + :param source_attributes: An optional list of dicts that + provide information about attributes for this index field. + A maximum of 20 source attributes can be configured for + each index field. + + Each item in the list is a dict with the following keys: + + * data_copy - The value is a dict with the following keys: + * default - Optional default value if the source attribute + is not specified in a document. + * name - The name of the document source field to add + to this ``IndexField``. + * data_function - Identifies the transformation to apply + when copying data from a source attribute. + * data_map - The value is a dict with the following keys: + * cases - A dict that translates source field values + to custom values. + * default - An optional default value to use if the + source attribute is not specified in a document. + * name - the name of the document source field to add + to this ``IndexField`` + * data_trim_title - Trims common title words from a source + document attribute when populating an ``IndexField``. + This can be used to create an ``IndexField`` you can + use for sorting. The value is a dict with the following + fields: + * default - An optional default value. + * language - an IETF RFC 4646 language code. + * separator - The separator that follows the text to trim. + * name - The name of the document source field to add. + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + data = self.layer1.define_index_field(self.name, field_name, + field_type, default=default, + facet=facet, result=result, + searchable=searchable, + source_attributes=source_attributes) + return IndexFieldStatus(self, data, + self.layer1.describe_index_fields) + + def get_rank_expressions(self, rank_names=None): + """ + Return a list of rank expressions defined for this domain. + """ + fn = self.layer1.describe_rank_expressions + data = fn(self.name, rank_names) + return [RankExpressionStatus(self, d, fn) for d in data] + + def create_rank_expression(self, name, expression): + """ + Create a new rank expression. + + :type rank_name: string + :param rank_name: The name of an expression computed for ranking + while processing a search request. + + :type rank_expression: string + :param rank_expression: The expression to evaluate for ranking + or thresholding while processing a search request. The + RankExpression syntax is based on JavaScript expressions + and supports: + + * Integer, floating point, hex and octal literals + * Shortcut evaluation of logical operators such that an + expression a || b evaluates to the value a if a is + true without evaluting b at all + * JavaScript order of precedence for operators + * Arithmetic operators: + - * / % + * Boolean operators (including the ternary operator) + * Bitwise operators + * Comparison operators + * Common mathematic functions: abs ceil erf exp floor + lgamma ln log2 log10 max min sqrt pow + * Trigonometric library functions: acosh acos asinh asin + atanh atan cosh cos sinh sin tanh tan + * Random generation of a number between 0 and 1: rand + * Current time in epoch: time + * The min max functions that operate on a variable argument list + + Intermediate results are calculated as double precision + floating point values. The final return value of a + RankExpression is automatically converted from floating + point to a 32-bit unsigned integer by rounding to the + nearest integer, with a natural floor of 0 and a ceiling + of max(uint32_t), 4294967295. Mathematical errors such as + dividing by 0 will fail during evaluation and return a + value of 0. + + The source data for a RankExpression can be the name of an + IndexField of type uint, another RankExpression or the + reserved name text_relevance. The text_relevance source is + defined to return an integer from 0 to 1000 (inclusive) to + indicate how relevant a document is to the search request, + taking into account repetition of search terms in the + document and proximity of search terms to each other in + each matching IndexField in the document. + + For more information about using rank expressions to + customize ranking, see the Amazon CloudSearch Developer + Guide. + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + data = self.layer1.define_rank_expression(self.name, name, expression) + return RankExpressionStatus(self, data, + self.layer1.describe_rank_expressions) + + def get_document_service(self): + return DocumentServiceConnection(domain=self) + + def get_search_service(self): + return SearchConnection(domain=self) + + def __repr__(self): + return '' % self.domain_name + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..69132e39ce76d9166babb424864703b80018c819 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/layer1.py @@ -0,0 +1,747 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +import boto.jsonresponse +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo + +#boto.set_stream_logger('cloudsearch') + + +def do_bool(val): + return 'true' if val in [True, 1, '1', 'true'] else 'false' + + +class Layer1(AWSQueryConnection): + + APIVersion = '2011-02-01' + DefaultRegionName = boto.config.get('Boto', 'cs_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'cs_region_endpoint', + 'cloudsearch.us-east-1.amazonaws.com') + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, host=None, port=None, + proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + api_version=None, security_token=None, + validate_certs=True, profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + AWSQueryConnection.__init__( + self, + host=self.region.endpoint, + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + is_secure=is_secure, + port=port, + proxy=proxy, + proxy_port=proxy_port, + proxy_user=proxy_user, + proxy_pass=proxy_pass, + debug=debug, + https_connection_factory=https_connection_factory, + path=path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def get_response(self, doc_path, action, params, path='/', + parent=None, verb='GET', list_marker=None): + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if response.status == 200: + e = boto.jsonresponse.Element( + list_marker=list_marker if list_marker else 'Set', + pythonize_name=True) + h = boto.jsonresponse.XmlHandler(e, parent) + h.parse(body) + inner = e + for p in doc_path: + inner = inner.get(p) + if not inner: + return None if list_marker is None else [] + if isinstance(inner, list): + return inner + else: + return dict(**inner) + else: + raise self.ResponseError(response.status, response.reason, body) + + def create_domain(self, domain_name): + """ + Create a new search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, LimitExceededException + """ + doc_path = ('create_domain_response', + 'create_domain_result', + 'domain_status') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'CreateDomain', + params, verb='POST') + + def define_index_field(self, domain_name, field_name, field_type, + default='', facet=False, result=False, + searchable=False, source_attributes=None): + """ + Defines an ``IndexField``, either replacing an existing + definition or creating a new one. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type field_name: string + :param field_name: The name of a field in the search index. + + :type field_type: string + :param field_type: The type of field. Valid values are + uint | literal | text + + :type default: string or int + :param default: The default value for the field. If the + field is of type ``uint`` this should be an integer value. + Otherwise, it's a string. + + :type facet: bool + :param facet: A boolean to indicate whether facets + are enabled for this field or not. Does not apply to + fields of type ``uint``. + + :type results: bool + :param results: A boolean to indicate whether values + of this field can be returned in search results or + used in ranking. Does not apply to fields of type ``uint``. + + :type searchable: bool + :param searchable: A boolean to indicate whether search + is enabled for this field or not. Applies only to fields + of type ``literal``. + + :type source_attributes: list of dicts + :param source_attributes: An optional list of dicts that + provide information about attributes for this index field. + A maximum of 20 source attributes can be configured for + each index field. + + Each item in the list is a dict with the following keys: + + * data_copy - The value is a dict with the following keys: + * default - Optional default value if the source attribute + is not specified in a document. + * name - The name of the document source field to add + to this ``IndexField``. + * data_function - Identifies the transformation to apply + when copying data from a source attribute. + * data_map - The value is a dict with the following keys: + * cases - A dict that translates source field values + to custom values. + * default - An optional default value to use if the + source attribute is not specified in a document. + * name - the name of the document source field to add + to this ``IndexField`` + * data_trim_title - Trims common title words from a source + document attribute when populating an ``IndexField``. + This can be used to create an ``IndexField`` you can + use for sorting. The value is a dict with the following + fields: + * default - An optional default value. + * language - an IETF RFC 4646 language code. + * separator - The separator that follows the text to trim. + * name - The name of the document source field to add. + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + doc_path = ('define_index_field_response', + 'define_index_field_result', + 'index_field') + params = {'DomainName': domain_name, + 'IndexField.IndexFieldName': field_name, + 'IndexField.IndexFieldType': field_type} + if field_type == 'literal': + params['IndexField.LiteralOptions.DefaultValue'] = default + params['IndexField.LiteralOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.LiteralOptions.ResultEnabled'] = do_bool(result) + params['IndexField.LiteralOptions.SearchEnabled'] = do_bool(searchable) + elif field_type == 'uint': + params['IndexField.UIntOptions.DefaultValue'] = default + elif field_type == 'text': + params['IndexField.TextOptions.DefaultValue'] = default + params['IndexField.TextOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.TextOptions.ResultEnabled'] = do_bool(result) + + return self.get_response(doc_path, 'DefineIndexField', + params, verb='POST') + + def define_rank_expression(self, domain_name, rank_name, rank_expression): + """ + Defines a RankExpression, either replacing an existing + definition or creating a new one. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type rank_name: string + :param rank_name: The name of an expression computed for ranking + while processing a search request. + + :type rank_expression: string + :param rank_expression: The expression to evaluate for ranking + or thresholding while processing a search request. The + RankExpression syntax is based on JavaScript expressions + and supports: + + * Integer, floating point, hex and octal literals + * Shortcut evaluation of logical operators such that an + expression a || b evaluates to the value a if a is + true without evaluting b at all + * JavaScript order of precedence for operators + * Arithmetic operators: + - * / % + * Boolean operators (including the ternary operator) + * Bitwise operators + * Comparison operators + * Common mathematic functions: abs ceil erf exp floor + lgamma ln log2 log10 max min sqrt pow + * Trigonometric library functions: acosh acos asinh asin + atanh atan cosh cos sinh sin tanh tan + * Random generation of a number between 0 and 1: rand + * Current time in epoch: time + * The min max functions that operate on a variable argument list + + Intermediate results are calculated as double precision + floating point values. The final return value of a + RankExpression is automatically converted from floating + point to a 32-bit unsigned integer by rounding to the + nearest integer, with a natural floor of 0 and a ceiling + of max(uint32_t), 4294967295. Mathematical errors such as + dividing by 0 will fail during evaluation and return a + value of 0. + + The source data for a RankExpression can be the name of an + IndexField of type uint, another RankExpression or the + reserved name text_relevance. The text_relevance source is + defined to return an integer from 0 to 1000 (inclusive) to + indicate how relevant a document is to the search request, + taking into account repetition of search terms in the + document and proximity of search terms to each other in + each matching IndexField in the document. + + For more information about using rank expressions to + customize ranking, see the Amazon CloudSearch Developer + Guide. + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + doc_path = ('define_rank_expression_response', + 'define_rank_expression_result', + 'rank_expression') + params = {'DomainName': domain_name, + 'RankExpression.RankExpression': rank_expression, + 'RankExpression.RankName': rank_name} + return self.get_response(doc_path, 'DefineRankExpression', + params, verb='POST') + + def delete_domain(self, domain_name): + """ + Delete a search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException + """ + doc_path = ('delete_domain_response', + 'delete_domain_result', + 'domain_status') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DeleteDomain', + params, verb='POST') + + def delete_index_field(self, domain_name, field_name): + """ + Deletes an existing ``IndexField`` from the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type field_name: string + :param field_name: A string that represents the name of + an index field. Field names must begin with a letter and + can contain the following characters: a-z (lowercase), + 0-9, and _ (underscore). Uppercase letters and hyphens are + not allowed. The names "body", "docid", and + "text_relevance" are reserved and cannot be specified as + field or rank expression names. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('delete_index_field_response', + 'delete_index_field_result', + 'index_field') + params = {'DomainName': domain_name, + 'IndexFieldName': field_name} + return self.get_response(doc_path, 'DeleteIndexField', + params, verb='POST') + + def delete_rank_expression(self, domain_name, rank_name): + """ + Deletes an existing ``RankExpression`` from the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type rank_name: string + :param rank_name: Name of the ``RankExpression`` to delete. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('delete_rank_expression_response', + 'delete_rank_expression_result', + 'rank_expression') + params = {'DomainName': domain_name, 'RankName': rank_name} + return self.get_response(doc_path, 'DeleteRankExpression', + params, verb='POST') + + def describe_default_search_field(self, domain_name): + """ + Describes options defining the default search field used by + indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_default_search_field_response', + 'describe_default_search_field_result', + 'default_search_field') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeDefaultSearchField', + params, verb='POST') + + def describe_domains(self, domain_names=None): + """ + Describes the domains (optionally limited to one or more + domains by name) owned by this account. + + :type domain_names: list + :param domain_names: Limits the response to the specified domains. + + :raises: BaseException, InternalException + """ + doc_path = ('describe_domains_response', + 'describe_domains_result', + 'domain_status_list') + params = {} + if domain_names: + for i, domain_name in enumerate(domain_names, 1): + params['DomainNames.member.%d' % i] = domain_name + return self.get_response(doc_path, 'DescribeDomains', + params, verb='POST', + list_marker='DomainStatusList') + + def describe_index_fields(self, domain_name, field_names=None): + """ + Describes index fields in the search domain, optionally + limited to a single ``IndexField``. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type field_names: list + :param field_names: Limits the response to the specified fields. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_index_fields_response', + 'describe_index_fields_result', + 'index_fields') + params = {'DomainName': domain_name} + if field_names: + for i, field_name in enumerate(field_names, 1): + params['FieldNames.member.%d' % i] = field_name + return self.get_response(doc_path, 'DescribeIndexFields', + params, verb='POST', + list_marker='IndexFields') + + def describe_rank_expressions(self, domain_name, rank_names=None): + """ + Describes RankExpressions in the search domain, optionally + limited to a single expression. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type rank_names: list + :param rank_names: Limit response to the specified rank names. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_rank_expressions_response', + 'describe_rank_expressions_result', + 'rank_expressions') + params = {'DomainName': domain_name} + if rank_names: + for i, rank_name in enumerate(rank_names, 1): + params['RankNames.member.%d' % i] = rank_name + return self.get_response(doc_path, 'DescribeRankExpressions', + params, verb='POST', + list_marker='RankExpressions') + + def describe_service_access_policies(self, domain_name): + """ + Describes the resource-based policies controlling access to + the services in this search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_service_access_policies_response', + 'describe_service_access_policies_result', + 'access_policies') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeServiceAccessPolicies', + params, verb='POST') + + def describe_stemming_options(self, domain_name): + """ + Describes stemming options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_stemming_options_response', + 'describe_stemming_options_result', + 'stems') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeStemmingOptions', + params, verb='POST') + + def describe_stopword_options(self, domain_name): + """ + Describes stopword options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_stopword_options_response', + 'describe_stopword_options_result', + 'stopwords') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeStopwordOptions', + params, verb='POST') + + def describe_synonym_options(self, domain_name): + """ + Describes synonym options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_synonym_options_response', + 'describe_synonym_options_result', + 'synonyms') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeSynonymOptions', + params, verb='POST') + + def index_documents(self, domain_name): + """ + Tells the search domain to start scanning its documents using + the latest text processing options and ``IndexFields``. This + operation must be invoked to make visible in searches any + options whose OptionStatus has ``OptionState`` of + ``RequiresIndexDocuments``. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('index_documents_response', + 'index_documents_result', + 'field_names') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'IndexDocuments', params, + verb='POST', list_marker='FieldNames') + + def update_default_search_field(self, domain_name, default_search_field): + """ + Updates options defining the default search field used by + indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type default_search_field: string + :param default_search_field: The IndexField to use for search + requests issued with the q parameter. The default is an + empty string, which automatically searches all text + fields. + + :raises: BaseException, InternalException, InvalidTypeException, + ResourceNotFoundException + """ + doc_path = ('update_default_search_field_response', + 'update_default_search_field_result', + 'default_search_field') + params = {'DomainName': domain_name, + 'DefaultSearchField': default_search_field} + return self.get_response(doc_path, 'UpdateDefaultSearchField', + params, verb='POST') + + def update_service_access_policies(self, domain_name, access_policies): + """ + Updates the policies controlling access to the services in + this search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type access_policies: string + :param access_policies: An IAM access policy as described in + The Access Policy Language in Using AWS Identity and + Access Management. The maximum size of an access policy + document is 100KB. + + :raises: BaseException, InternalException, LimitExceededException, + ResourceNotFoundException, InvalidTypeException + """ + doc_path = ('update_service_access_policies_response', + 'update_service_access_policies_result', + 'access_policies') + params = {'AccessPolicies': access_policies, + 'DomainName': domain_name} + return self.get_response(doc_path, 'UpdateServiceAccessPolicies', + params, verb='POST') + + def update_stemming_options(self, domain_name, stems): + """ + Updates stemming options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type stems: string + :param stems: Maps terms to their stems. The JSON object + has a single key called "stems" whose value is a + dict mapping terms to their stems. The maximum size + of a stemming document is 500KB. + Example: {"stems":{"people": "person", "walking":"walk"}} + + :raises: BaseException, InternalException, InvalidTypeException, + LimitExceededException, ResourceNotFoundException + """ + doc_path = ('update_stemming_options_response', + 'update_stemming_options_result', + 'stems') + params = {'DomainName': domain_name, + 'Stems': stems} + return self.get_response(doc_path, 'UpdateStemmingOptions', + params, verb='POST') + + def update_stopword_options(self, domain_name, stopwords): + """ + Updates stopword options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type stopwords: string + :param stopwords: Lists stopwords in a JSON object. The object has a + single key called "stopwords" whose value is an array of strings. + The maximum size of a stopwords document is 10KB. Example: + {"stopwords": ["a", "an", "the", "of"]} + + :raises: BaseException, InternalException, InvalidTypeException, + LimitExceededException, ResourceNotFoundException + """ + doc_path = ('update_stopword_options_response', + 'update_stopword_options_result', + 'stopwords') + params = {'DomainName': domain_name, + 'Stopwords': stopwords} + return self.get_response(doc_path, 'UpdateStopwordOptions', + params, verb='POST') + + def update_synonym_options(self, domain_name, synonyms): + """ + Updates synonym options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type synonyms: string + :param synonyms: Maps terms to their synonyms. The JSON object + has a single key "synonyms" whose value is a dict mapping terms + to their synonyms. Each synonym is a simple string or an + array of strings. The maximum size of a stopwords document + is 100KB. Example: + {"synonyms": {"cat": ["feline", "kitten"], "puppy": "dog"}} + + :raises: BaseException, InternalException, InvalidTypeException, + LimitExceededException, ResourceNotFoundException + """ + doc_path = ('update_synonym_options_response', + 'update_synonym_options_result', + 'synonyms') + params = {'DomainName': domain_name, + 'Synonyms': synonyms} + return self.get_response(doc_path, 'UpdateSynonymOptions', + params, verb='POST') diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/layer2.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/layer2.py new file mode 100644 index 0000000000000000000000000000000000000000..b565d4b5b90f30c22d72a60e341afcc34daaeafc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/layer2.py @@ -0,0 +1,75 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.cloudsearch.layer1 import Layer1 +from boto.cloudsearch.domain import Domain + + +class Layer2(object): + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + host=None, debug=0, session_token=None, region=None, + validate_certs=True): + self.layer1 = Layer1( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + is_secure=is_secure, + port=port, + proxy=proxy, + proxy_port=proxy_port, + host=host, + debug=debug, + security_token=session_token, + region=region, + validate_certs=validate_certs) + + def list_domains(self, domain_names=None): + """ + Return a list of :class:`boto.cloudsearch.domain.Domain` + objects for each domain defined in the current account. + """ + domain_data = self.layer1.describe_domains(domain_names) + return [Domain(self.layer1, data) for data in domain_data] + + def create_domain(self, domain_name): + """ + Create a new CloudSearch domain and return the corresponding + :class:`boto.cloudsearch.domain.Domain` object. + """ + data = self.layer1.create_domain(domain_name) + return Domain(self.layer1, data) + + def lookup(self, domain_name): + """ + Lookup a single domain + :param domain_name: The name of the domain to look up + :type domain_name: str + + :return: Domain object, or None if the domain isn't found + :rtype: :class:`boto.cloudsearch.domain.Domain` + """ + domains = self.list_domains(domain_names=[domain_name]) + if len(domains) > 0: + return domains[0] diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/optionstatus.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/optionstatus.py new file mode 100644 index 0000000000000000000000000000000000000000..dddda76f970d1d950e88d259ec6ff099fac7d4c1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/optionstatus.py @@ -0,0 +1,248 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import time +from boto.compat import json + + +class OptionStatus(dict): + """ + Presents a combination of status field (defined below) which are + accessed as attributes and option values which are stored in the + native Python dictionary. In this class, the option values are + merged from a JSON object that is stored as the Option part of + the object. + + :ivar domain_name: The name of the domain this option is associated with. + :ivar create_date: A timestamp for when this option was created. + :ivar state: The state of processing a change to an option. + Possible values: + + * RequiresIndexDocuments: the option's latest value will not + be visible in searches until IndexDocuments has been called + and indexing is complete. + * Processing: the option's latest value is not yet visible in + all searches but is in the process of being activated. + * Active: the option's latest value is completely visible. + + :ivar update_date: A timestamp for when this option was updated. + :ivar update_version: A unique integer that indicates when this + option was last updated. + """ + + def __init__(self, domain, data=None, refresh_fn=None, save_fn=None): + self.domain = domain + self.refresh_fn = refresh_fn + self.save_fn = save_fn + self.refresh(data) + + def _update_status(self, status): + self.creation_date = status['creation_date'] + self.status = status['state'] + self.update_date = status['update_date'] + self.update_version = int(status['update_version']) + + def _update_options(self, options): + if options: + self.update(json.loads(options)) + + def refresh(self, data=None): + """ + Refresh the local state of the object. You can either pass + new state data in as the parameter ``data`` or, if that parameter + is omitted, the state data will be retrieved from CloudSearch. + """ + if not data: + if self.refresh_fn: + data = self.refresh_fn(self.domain.name) + if data: + self._update_status(data['status']) + self._update_options(data['options']) + + def to_json(self): + """ + Return the JSON representation of the options as a string. + """ + return json.dumps(self) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'CreationDate': + self.created = value + elif name == 'State': + self.state = value + elif name == 'UpdateDate': + self.updated = value + elif name == 'UpdateVersion': + self.update_version = int(value) + elif name == 'Options': + self.update_from_json_doc(value) + else: + setattr(self, name, value) + + def save(self): + """ + Write the current state of the local object back to the + CloudSearch service. + """ + if self.save_fn: + data = self.save_fn(self.domain.name, self.to_json()) + self.refresh(data) + + def wait_for_state(self, state): + """ + Performs polling of CloudSearch to wait for the ``state`` + of this object to change to the provided state. + """ + while self.state != state: + time.sleep(5) + self.refresh() + + +class IndexFieldStatus(OptionStatus): + + def _update_options(self, options): + self.update(options) + + def save(self): + pass + + +class RankExpressionStatus(IndexFieldStatus): + + pass + +class ServicePoliciesStatus(OptionStatus): + + def new_statement(self, arn, ip): + """ + Returns a new policy statement that will allow + access to the service described by ``arn`` by the + ip specified in ``ip``. + + :type arn: string + :param arn: The Amazon Resource Notation identifier for the + service you wish to provide access to. This would be + either the search service or the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + return { + "Effect":"Allow", + "Action":"*", # Docs say use GET, but denies unless * + "Resource": arn, + "Condition": { + "IpAddress": { + "aws:SourceIp": [ip] + } + } + } + + def _allow_ip(self, arn, ip): + if 'Statement' not in self: + s = self.new_statement(arn, ip) + self['Statement'] = [s] + self.save() + else: + add_statement = True + for statement in self['Statement']: + if statement['Resource'] == arn: + for condition_name in statement['Condition']: + if condition_name == 'IpAddress': + add_statement = False + condition = statement['Condition'][condition_name] + if ip not in condition['aws:SourceIp']: + condition['aws:SourceIp'].append(ip) + + if add_statement: + s = self.new_statement(arn, ip) + self['Statement'].append(s) + self.save() + + def allow_search_ip(self, ip): + """ + Add the provided ip address or CIDR block to the list of + allowable address for the search service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.search_service_arn + self._allow_ip(arn, ip) + + def allow_doc_ip(self, ip): + """ + Add the provided ip address or CIDR block to the list of + allowable address for the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.doc_service_arn + self._allow_ip(arn, ip) + + def _disallow_ip(self, arn, ip): + if 'Statement' not in self: + return + need_update = False + for statement in self['Statement']: + if statement['Resource'] == arn: + for condition_name in statement['Condition']: + if condition_name == 'IpAddress': + condition = statement['Condition'][condition_name] + if ip in condition['aws:SourceIp']: + condition['aws:SourceIp'].remove(ip) + need_update = True + if need_update: + self.save() + + def disallow_search_ip(self, ip): + """ + Remove the provided ip address or CIDR block from the list of + allowable address for the search service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.search_service_arn + self._disallow_ip(arn, ip) + + def disallow_doc_ip(self, ip): + """ + Remove the provided ip address or CIDR block from the list of + allowable address for the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.doc_service_arn + self._disallow_ip(arn, ip) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/search.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/search.py new file mode 100644 index 0000000000000000000000000000000000000000..70ea479becbdccdbb3ae7cc790ce463f563fa99c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/search.py @@ -0,0 +1,377 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from math import ceil +from boto.compat import json, map, six +import requests + + +class SearchServiceException(Exception): + pass + + +class CommitMismatchError(Exception): + pass + + +class SearchResults(object): + def __init__(self, **attrs): + self.rid = attrs['info']['rid'] + # self.doc_coverage_pct = attrs['info']['doc-coverage-pct'] + self.cpu_time_ms = attrs['info']['cpu-time-ms'] + self.time_ms = attrs['info']['time-ms'] + self.hits = attrs['hits']['found'] + self.docs = attrs['hits']['hit'] + self.start = attrs['hits']['start'] + self.rank = attrs['rank'] + self.match_expression = attrs['match-expr'] + self.query = attrs['query'] + self.search_service = attrs['search_service'] + + self.facets = {} + if 'facets' in attrs: + for (facet, values) in attrs['facets'].items(): + if 'constraints' in values: + self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values['constraints'])) + + self.num_pages_needed = ceil(self.hits / self.query.real_size) + + def __len__(self): + return len(self.docs) + + def __iter__(self): + return iter(self.docs) + + def next_page(self): + """Call Cloudsearch to get the next page of search results + + :rtype: :class:`boto.cloudsearch.search.SearchResults` + :return: the following page of search results + """ + if self.query.page <= self.num_pages_needed: + self.query.start += self.query.real_size + self.query.page += 1 + return self.search_service(self.query) + else: + raise StopIteration + + +class Query(object): + + RESULTS_PER_PAGE = 500 + + def __init__(self, q=None, bq=None, rank=None, + return_fields=None, size=10, + start=0, facet=None, facet_constraints=None, + facet_sort=None, facet_top_n=None, t=None): + + self.q = q + self.bq = bq + self.rank = rank or [] + self.return_fields = return_fields or [] + self.start = start + self.facet = facet or [] + self.facet_constraints = facet_constraints or {} + self.facet_sort = facet_sort or {} + self.facet_top_n = facet_top_n or {} + self.t = t or {} + self.page = 0 + self.update_size(size) + + def update_size(self, new_size): + self.size = new_size + self.real_size = Query.RESULTS_PER_PAGE if (self.size > + Query.RESULTS_PER_PAGE or self.size == 0) else self.size + + def to_params(self): + """Transform search parameters from instance properties to a dictionary + + :rtype: dict + :return: search parameters + """ + params = {'start': self.start, 'size': self.real_size} + + if self.q: + params['q'] = self.q + + if self.bq: + params['bq'] = self.bq + + if self.rank: + params['rank'] = ','.join(self.rank) + + if self.return_fields: + params['return-fields'] = ','.join(self.return_fields) + + if self.facet: + params['facet'] = ','.join(self.facet) + + if self.facet_constraints: + for k, v in six.iteritems(self.facet_constraints): + params['facet-%s-constraints' % k] = v + + if self.facet_sort: + for k, v in six.iteritems(self.facet_sort): + params['facet-%s-sort' % k] = v + + if self.facet_top_n: + for k, v in six.iteritems(self.facet_top_n): + params['facet-%s-top-n' % k] = v + + if self.t: + for k, v in six.iteritems(self.t): + params['t-%s' % k] = v + return params + + +class SearchConnection(object): + + def __init__(self, domain=None, endpoint=None): + self.domain = domain + self.endpoint = endpoint + if not endpoint: + self.endpoint = domain.search_service_endpoint + + def build_query(self, q=None, bq=None, rank=None, return_fields=None, + size=10, start=0, facet=None, facet_constraints=None, + facet_sort=None, facet_top_n=None, t=None): + return Query(q=q, bq=bq, rank=rank, return_fields=return_fields, + size=size, start=start, facet=facet, + facet_constraints=facet_constraints, + facet_sort=facet_sort, facet_top_n=facet_top_n, t=t) + + def search(self, q=None, bq=None, rank=None, return_fields=None, + size=10, start=0, facet=None, facet_constraints=None, + facet_sort=None, facet_top_n=None, t=None): + """ + Send a query to CloudSearch + + Each search query should use at least the q or bq argument to specify + the search parameter. The other options are used to specify the + criteria of the search. + + :type q: string + :param q: A string to search the default search fields for. + + :type bq: string + :param bq: A string to perform a Boolean search. This can be used to + create advanced searches. + + :type rank: List of strings + :param rank: A list of fields or rank expressions used to order the + search results. A field can be reversed by using the - operator. + ``['-year', 'author']`` + + :type return_fields: List of strings + :param return_fields: A list of fields which should be returned by the + search. If this field is not specified, only IDs will be returned. + ``['headline']`` + + :type size: int + :param size: Number of search results to specify + + :type start: int + :param start: Offset of the first search result to return (can be used + for paging) + + :type facet: list + :param facet: List of fields for which facets should be returned + ``['colour', 'size']`` + + :type facet_constraints: dict + :param facet_constraints: Use to limit facets to specific values + specified as comma-delimited strings in a Dictionary of facets + ``{'colour': "'blue','white','red'", 'size': "big"}`` + + :type facet_sort: dict + :param facet_sort: Rules used to specify the order in which facet + values should be returned. Allowed values are *alpha*, *count*, + *max*, *sum*. Use *alpha* to sort alphabetical, and *count* to sort + the facet by number of available result. + ``{'color': 'alpha', 'size': 'count'}`` + + :type facet_top_n: dict + :param facet_top_n: Dictionary of facets and number of facets to + return. + ``{'colour': 2}`` + + :type t: dict + :param t: Specify ranges for specific fields + ``{'year': '2000..2005'}`` + + :rtype: :class:`boto.cloudsearch.search.SearchResults` + :return: Returns the results of this search + + The following examples all assume we have indexed a set of documents + with fields: *author*, *date*, *headline* + + A simple search will look for documents whose default text search + fields will contain the search word exactly: + + >>> search(q='Tim') # Return documents with the word Tim in them (but not Timothy) + + A simple search with more keywords will return documents whose default + text search fields contain the search strings together or separately. + + >>> search(q='Tim apple') # Will match "tim" and "apple" + + More complex searches require the boolean search operator. + + Wildcard searches can be used to search for any words that start with + the search string. + + >>> search(bq="'Tim*'") # Return documents with words like Tim or Timothy) + + Search terms can also be combined. Allowed operators are "and", "or", + "not", "field", "optional", "token", "phrase", or "filter" + + >>> search(bq="(and 'Tim' (field author 'John Smith'))") + + Facets allow you to show classification information about the search + results. For example, you can retrieve the authors who have written + about Tim: + + >>> search(q='Tim', facet=['Author']) + + With facet_constraints, facet_top_n and facet_sort more complicated + constraints can be specified such as returning the top author out of + John Smith and Mark Smith who have a document with the word Tim in it. + + >>> search(q='Tim', + ... facet=['Author'], + ... facet_constraints={'author': "'John Smith','Mark Smith'"}, + ... facet=['author'], + ... facet_top_n={'author': 1}, + ... facet_sort={'author': 'count'}) + """ + + query = self.build_query(q=q, bq=bq, rank=rank, + return_fields=return_fields, + size=size, start=start, facet=facet, + facet_constraints=facet_constraints, + facet_sort=facet_sort, + facet_top_n=facet_top_n, t=t) + return self(query) + + def __call__(self, query): + """Make a call to CloudSearch + + :type query: :class:`boto.cloudsearch.search.Query` + :param query: A group of search criteria + + :rtype: :class:`boto.cloudsearch.search.SearchResults` + :return: search results + """ + url = "http://%s/2011-02-01/search" % (self.endpoint) + params = query.to_params() + + r = requests.get(url, params=params) + body = r.content.decode('utf-8') + try: + data = json.loads(body) + except ValueError as e: + if r.status_code == 403: + msg = '' + import re + g = re.search('

403 Forbidden

([^<]+)<', body) + try: + msg = ': %s' % (g.groups()[0].strip()) + except AttributeError: + pass + raise SearchServiceException('Authentication error from Amazon%s' % msg) + raise SearchServiceException("Got non-json response from Amazon. %s" % body, query) + + if 'messages' in data and 'error' in data: + for m in data['messages']: + if m['severity'] == 'fatal': + raise SearchServiceException("Error processing search %s " + "=> %s" % (params, m['message']), query) + elif 'error' in data: + raise SearchServiceException("Unknown error processing search %s" + % json.dumps(data), query) + + data['query'] = query + data['search_service'] = self + + return SearchResults(**data) + + def get_all_paged(self, query, per_page): + """Get a generator to iterate over all pages of search results + + :type query: :class:`boto.cloudsearch.search.Query` + :param query: A group of search criteria + + :type per_page: int + :param per_page: Number of docs in each :class:`boto.cloudsearch.search.SearchResults` object. + + :rtype: generator + :return: Generator containing :class:`boto.cloudsearch.search.SearchResults` + """ + query.update_size(per_page) + page = 0 + num_pages_needed = 0 + while page <= num_pages_needed: + results = self(query) + num_pages_needed = results.num_pages_needed + yield results + query.start += query.real_size + page += 1 + + def get_all_hits(self, query): + """Get a generator to iterate over all search results + + Transparently handles the results paging from Cloudsearch + search results so even if you have many thousands of results + you can iterate over all results in a reasonably efficient + manner. + + :type query: :class:`boto.cloudsearch.search.Query` + :param query: A group of search criteria + + :rtype: generator + :return: All docs matching query + """ + page = 0 + num_pages_needed = 0 + while page <= num_pages_needed: + results = self(query) + num_pages_needed = results.num_pages_needed + for doc in results: + yield doc + query.start += query.real_size + page += 1 + + def get_num_hits(self, query): + """Return the total number of hits for query + + :type query: :class:`boto.cloudsearch.search.Query` + :param query: a group of search criteria + + :rtype: int + :return: Total number of hits for query + """ + query.update_size(1) + return self(query).hits + + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/sourceattribute.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/sourceattribute.py new file mode 100644 index 0000000000000000000000000000000000000000..2883314722c9d8164c9a81faf2541644d3dbe473 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch/sourceattribute.py @@ -0,0 +1,74 @@ +# Copyright (c) 202 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class SourceAttribute(object): + """ + Provide information about attributes for an index field. + A maximum of 20 source attributes can be configured for + each index field. + + :ivar default: Optional default value if the source attribute + is not specified in a document. + + :ivar name: The name of the document source field to add + to this ``IndexField``. + + :ivar data_function: Identifies the transformation to apply + when copying data from a source attribute. + + :ivar data_map: The value is a dict with the following keys: + * cases - A dict that translates source field values + to custom values. + * default - An optional default value to use if the + source attribute is not specified in a document. + * name - the name of the document source field to add + to this ``IndexField`` + :ivar data_trim_title: Trims common title words from a source + document attribute when populating an ``IndexField``. + This can be used to create an ``IndexField`` you can + use for sorting. The value is a dict with the following + fields: + * default - An optional default value. + * language - an IETF RFC 4646 language code. + * separator - The separator that follows the text to trim. + * name - The name of the document source field to add. + """ + + ValidDataFunctions = ('Copy', 'TrimTitle', 'Map') + + def __init__(self): + self.data_copy = {} + self._data_function = self.ValidDataFunctions[0] + self.data_map = {} + self.data_trim_title = {} + + @property + def data_function(self): + return self._data_function + + @data_function.setter + def data_function(self, value): + if value not in self.ValidDataFunctions: + valid = '|'.join(self.ValidDataFunctions) + raise ValueError('data_function must be one of: %s' % valid) + self._data_function = value diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d14c91793516de0ff23a409400e64e4ecc174b49 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from boto.regioninfo import get_regions + + +def regions(): + """ + Get all available regions for the Amazon CloudSearch service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + import boto.cloudsearch2.layer1 + return get_regions( + 'cloudsearch', + connection_cls=boto.cloudsearch2.layer1.CloudSearchConnection + ) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/document.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/document.py new file mode 100644 index 0000000000000000000000000000000000000000..cfe5fe623d85237741262a6e465d9bb89ea9c77e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/document.py @@ -0,0 +1,315 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto.exception +from boto.compat import json +import requests +import boto +from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + + +class SearchServiceException(Exception): + pass + + +class CommitMismatchError(Exception): + # Let's do some extra work and let the user handle errors on his/her own. + + errors = None + + +class EncodingError(Exception): + """ + Content sent for Cloud Search indexing was incorrectly encoded. + + This usually happens when a document is marked as unicode but non-unicode + characters are present. + """ + pass + + +class ContentTooLongError(Exception): + """ + Content sent for Cloud Search indexing was too long + + This will usually happen when documents queued for indexing add up to more + than the limit allowed per upload batch (5MB) + + """ + pass + + +class DocumentServiceConnection(object): + """ + A CloudSearch document service. + + The DocumentServiceConection is used to add, remove and update documents in + CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document + Format). + + To generate an appropriate SDF, use :func:`add` to add or update documents, + as well as :func:`delete` to remove documents. + + Once the set of documents is ready to be index, use :func:`commit` to send + the commands to CloudSearch. + + If there are a lot of documents to index, it may be preferable to split the + generation of SDF data and the actual uploading into CloudSearch. Retrieve + the current SDF with :func:`get_sdf`. If this file is the uploaded into S3, + it can be retrieved back afterwards for upload into CloudSearch using + :func:`add_sdf_from_s3`. + + The SDF is not cleared after a :func:`commit`. If you wish to continue + using the DocumentServiceConnection for another batch upload of commands, + you will need to :func:`clear_sdf` first to stop the previous batch of + commands from being uploaded again. + + """ + + def __init__(self, domain=None, endpoint=None): + self.domain = domain + self.endpoint = endpoint + if not self.endpoint: + self.endpoint = domain.doc_service_endpoint + self.documents_batch = [] + self._sdf = None + + # Copy proxy settings from connection and check if request should be signed + self.proxy = {} + self.sign_request = False + if self.domain and self.domain.layer1: + if self.domain.layer1.use_proxy: + self.proxy = {'http': self.domain.layer1.get_proxy_url_with_auth()} + + self.sign_request = getattr(self.domain.layer1, 'sign_request', False) + + if self.sign_request: + # Create a domain connection to send signed requests + layer1 = self.domain.layer1 + self.domain_connection = CloudSearchDomainConnection( + host=self.endpoint, + aws_access_key_id=layer1.aws_access_key_id, + aws_secret_access_key=layer1.aws_secret_access_key, + region=layer1.region, + provider=layer1.provider + ) + + def add(self, _id, fields): + """ + Add a document to be processed by the DocumentService + + The document will not actually be added until :func:`commit` is called + + :type _id: string + :param _id: A unique ID used to refer to this document. + + :type fields: dict + :param fields: A dictionary of key-value pairs to be uploaded . + """ + + d = {'type': 'add', 'id': _id, 'fields': fields} + self.documents_batch.append(d) + + def delete(self, _id): + """ + Schedule a document to be removed from the CloudSearch service + + The document will not actually be scheduled for removal until + :func:`commit` is called + + :type _id: string + :param _id: The unique ID of this document. + """ + + d = {'type': 'delete', 'id': _id} + self.documents_batch.append(d) + + def get_sdf(self): + """ + Generate the working set of documents in Search Data Format (SDF) + + :rtype: string + :returns: JSON-formatted string of the documents in SDF + """ + + return self._sdf if self._sdf else json.dumps(self.documents_batch) + + def clear_sdf(self): + """ + Clear the working documents from this DocumentServiceConnection + + This should be used after :func:`commit` if the connection will be + reused for another set of documents. + """ + + self._sdf = None + self.documents_batch = [] + + def add_sdf_from_s3(self, key_obj): + """ + Load an SDF from S3 + + Using this method will result in documents added through + :func:`add` and :func:`delete` being ignored. + + :type key_obj: :class:`boto.s3.key.Key` + :param key_obj: An S3 key which contains an SDF + """ + #@todo:: (lucas) would be nice if this could just take an s3://uri..." + + self._sdf = key_obj.get_contents_as_string() + + def _commit_with_auth(self, sdf, api_version): + return self.domain_connection.upload_documents(sdf, 'application/json') + + def _commit_without_auth(self, sdf, api_version): + url = "http://%s/%s/documents/batch" % (self.endpoint, api_version) + + # Keep-alive is automatic in a post-1.0 requests world. + session = requests.Session() + session.proxies = self.proxy + adapter = requests.adapters.HTTPAdapter( + pool_connections=20, + pool_maxsize=50, + max_retries=5 + ) + session.mount('http://', adapter) + session.mount('https://', adapter) + + resp = session.post(url, data=sdf, headers={'Content-Type': 'application/json'}) + return resp + + def commit(self): + """ + Actually send an SDF to CloudSearch for processing + + If an SDF file has been explicitly loaded it will be used. Otherwise, + documents added through :func:`add` and :func:`delete` will be used. + + :rtype: :class:`CommitResponse` + :returns: A summary of documents added and deleted + """ + + sdf = self.get_sdf() + + if ': null' in sdf: + boto.log.error('null value in sdf detected. This will probably ' + 'raise 500 error.') + index = sdf.index(': null') + boto.log.error(sdf[index - 100:index + 100]) + + api_version = '2013-01-01' + if self.domain and self.domain.layer1: + api_version = self.domain.layer1.APIVersion + + if self.sign_request: + r = self._commit_with_auth(sdf, api_version) + else: + r = self._commit_without_auth(sdf, api_version) + + return CommitResponse(r, self, sdf, signed_request=self.sign_request) + + +class CommitResponse(object): + """Wrapper for response to Cloudsearch document batch commit. + + :type response: :class:`requests.models.Response` + :param response: Response from Cloudsearch /documents/batch API + + :type doc_service: :class:`boto.cloudsearch2.document.DocumentServiceConnection` + :param doc_service: Object containing the documents posted and methods to + retry + + :raises: :class:`boto.exception.BotoServerError` + :raises: :class:`boto.cloudsearch2.document.SearchServiceException` + :raises: :class:`boto.cloudsearch2.document.EncodingError` + :raises: :class:`boto.cloudsearch2.document.ContentTooLongError` + """ + def __init__(self, response, doc_service, sdf, signed_request=False): + self.response = response + self.doc_service = doc_service + self.sdf = sdf + self.signed_request = signed_request + + if self.signed_request: + self.content = response + else: + _body = response.content.decode('utf-8') + + try: + self.content = json.loads(_body) + except: + boto.log.error('Error indexing documents.\nResponse Content:\n{0}' + '\n\nSDF:\n{1}'.format(_body, self.sdf)) + raise boto.exception.BotoServerError(self.response.status_code, '', + body=_body) + + self.status = self.content['status'] + if self.status == 'error': + self.errors = [e.get('message') for e in self.content.get('errors', + [])] + for e in self.errors: + if "Illegal Unicode character" in e: + raise EncodingError("Illegal Unicode character in document") + elif e == "The Content-Length is too long": + raise ContentTooLongError("Content was too long") + else: + self.errors = [] + + self.adds = self.content['adds'] + self.deletes = self.content['deletes'] + self._check_num_ops('add', self.adds) + self._check_num_ops('delete', self.deletes) + + def _check_num_ops(self, type_, response_num): + """Raise exception if number of ops in response doesn't match commit + + :type type_: str + :param type_: Type of commit operation: 'add' or 'delete' + + :type response_num: int + :param response_num: Number of adds or deletes in the response. + + :raises: :class:`boto.cloudsearch2.document.CommitMismatchError` + """ + commit_num = len([d for d in self.doc_service.documents_batch + if d['type'] == type_]) + + if response_num != commit_num: + if self.signed_request: + boto.log.debug(self.response) + else: + boto.log.debug(self.response.content) + # There will always be a commit mismatch error if there is any + # errors on cloudsearch. self.errors gets lost when this + # CommitMismatchError is raised. Whoever is using boto has no idea + # why their commit failed. They can't even notify the user of the + # cause by parsing the error messages from amazon. So let's + # attach the self.errors to the exceptions if we already spent + # time and effort collecting them out of the response. + exc = CommitMismatchError( + 'Incorrect number of {0}s returned. Commit: {1} Response: {2}' + .format(type_, commit_num, response_num) + ) + exc.errors = self.errors + raise exc diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/domain.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/domain.py new file mode 100644 index 0000000000000000000000000000000000000000..956af216d8440e2857dafda3928fdd0599d4c985 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/domain.py @@ -0,0 +1,542 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.cloudsearch2.optionstatus import IndexFieldStatus +from boto.cloudsearch2.optionstatus import ServicePoliciesStatus +from boto.cloudsearch2.optionstatus import ExpressionStatus +from boto.cloudsearch2.optionstatus import AvailabilityOptionsStatus +from boto.cloudsearch2.optionstatus import ScalingParametersStatus +from boto.cloudsearch2.document import DocumentServiceConnection +from boto.cloudsearch2.search import SearchConnection + + +def handle_bool(value): + if value in [True, 'true', 'True', 'TRUE', 1]: + return True + return False + + +class Domain(object): + """ + A Cloudsearch domain. + + :ivar name: The name of the domain. + + :ivar id: The internally generated unique identifier for the domain. + + :ivar created: A boolean which is True if the domain is + created. It can take several minutes to initialize a domain + when CreateDomain is called. Newly created search domains are + returned with a False value for Created until domain creation + is complete + + :ivar deleted: A boolean which is True if the search domain has + been deleted. The system must clean up resources dedicated to + the search domain when delete is called. Newly deleted + search domains are returned from list_domains with a True + value for deleted for several minutes until resource cleanup + is complete. + + :ivar processing: True if processing is being done to activate the + current domain configuration. + + :ivar num_searchable_docs: The number of documents that have been + submittted to the domain and indexed. + + :ivar requires_index_document: True if index_documents needs to be + called to activate the current domain configuration. + + :ivar search_instance_count: The number of search instances that are + available to process search requests. + + :ivar search_instance_type: The instance type that is being used to + process search requests. + + :ivar search_partition_count: The number of partitions across which + the search index is spread. + """ + + def __init__(self, layer1, data): + """ + Constructor - Create a domain object from a layer1 and data params + + :type layer1: :class:`boto.cloudsearch2.layer1.Layer1` object + :param layer1: A :class:`boto.cloudsearch2.layer1.Layer1` object + which is used to perform operations on the domain. + """ + self.layer1 = layer1 + self.update_from_data(data) + + def update_from_data(self, data): + self.created = data['Created'] + self.deleted = data['Deleted'] + self.processing = data['Processing'] + self.requires_index_documents = data['RequiresIndexDocuments'] + self.domain_id = data['DomainId'] + self.domain_name = data['DomainName'] + self.search_instance_count = data['SearchInstanceCount'] + self.search_instance_type = data.get('SearchInstanceType', None) + self.search_partition_count = data['SearchPartitionCount'] + self._doc_service = data['DocService'] + self._service_arn = data['ARN'] + self._search_service = data['SearchService'] + + @property + def service_arn(self): + return self._service_arn + + @property + def doc_service_endpoint(self): + return self._doc_service['Endpoint'] + + @property + def search_service_endpoint(self): + return self._search_service['Endpoint'] + + @property + def created(self): + return self._created + + @created.setter + def created(self, value): + self._created = handle_bool(value) + + @property + def deleted(self): + return self._deleted + + @deleted.setter + def deleted(self, value): + self._deleted = handle_bool(value) + + @property + def processing(self): + return self._processing + + @processing.setter + def processing(self, value): + self._processing = handle_bool(value) + + @property + def requires_index_documents(self): + return self._requires_index_documents + + @requires_index_documents.setter + def requires_index_documents(self, value): + self._requires_index_documents = handle_bool(value) + + @property + def search_partition_count(self): + return self._search_partition_count + + @search_partition_count.setter + def search_partition_count(self, value): + self._search_partition_count = int(value) + + @property + def search_instance_count(self): + return self._search_instance_count + + @search_instance_count.setter + def search_instance_count(self, value): + self._search_instance_count = int(value) + + @property + def name(self): + return self.domain_name + + @property + def id(self): + return self.domain_id + + def delete(self): + """ + Delete this domain and all index data associated with it. + """ + return self.layer1.delete_domain(self.name) + + def get_analysis_schemes(self): + """ + Return a list of Analysis Scheme objects. + """ + return self.layer1.describe_analysis_schemes(self.name) + + def get_availability_options(self): + """ + Return a :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus` + object representing the currently defined availability options for + the domain. + :return: OptionsStatus object + :rtype: :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus` + object + """ + return AvailabilityOptionsStatus( + self, refresh_fn=self.layer1.describe_availability_options, + refresh_key=['DescribeAvailabilityOptionsResponse', + 'DescribeAvailabilityOptionsResult', + 'AvailabilityOptions'], + save_fn=self.layer1.update_availability_options) + + def get_scaling_options(self): + """ + Return a :class:`boto.cloudsearch2.option.ScalingParametersStatus` + object representing the currently defined scaling options for the + domain. + :return: ScalingParametersStatus object + :rtype: :class:`boto.cloudsearch2.option.ScalingParametersStatus` + object + """ + return ScalingParametersStatus( + self, refresh_fn=self.layer1.describe_scaling_parameters, + refresh_key=['DescribeScalingParametersResponse', + 'DescribeScalingParametersResult', + 'ScalingParameters'], + save_fn=self.layer1.update_scaling_parameters) + + def get_access_policies(self): + """ + Return a :class:`boto.cloudsearch2.option.ServicePoliciesStatus` + object representing the currently defined access policies for the + domain. + :return: ServicePoliciesStatus object + :rtype: :class:`boto.cloudsearch2.option.ServicePoliciesStatus` object + """ + return ServicePoliciesStatus( + self, refresh_fn=self.layer1.describe_service_access_policies, + refresh_key=['DescribeServiceAccessPoliciesResponse', + 'DescribeServiceAccessPoliciesResult', + 'AccessPolicies'], + save_fn=self.layer1.update_service_access_policies) + + def index_documents(self): + """ + Tells the search domain to start indexing its documents using + the latest text processing options and IndexFields. This + operation must be invoked to make options whose OptionStatus + has OptionState of RequiresIndexDocuments visible in search + results. + """ + self.layer1.index_documents(self.name) + + def get_index_fields(self, field_names=None): + """ + Return a list of index fields defined for this domain. + :return: list of IndexFieldStatus objects + :rtype: list of :class:`boto.cloudsearch2.option.IndexFieldStatus` + object + """ + data = self.layer1.describe_index_fields(self.name, field_names) + + data = (data['DescribeIndexFieldsResponse'] + ['DescribeIndexFieldsResult'] + ['IndexFields']) + + return [IndexFieldStatus(self, d) for d in data] + + def create_index_field(self, field_name, field_type, + default='', facet=False, returnable=False, + searchable=False, sortable=False, + highlight=False, source_field=None, + analysis_scheme=None): + """ + Defines an ``IndexField``, either replacing an existing + definition or creating a new one. + + :type field_name: string + :param field_name: The name of a field in the search index. + + :type field_type: string + :param field_type: The type of field. Valid values are + int | double | literal | text | date | latlon | + int-array | double-array | literal-array | text-array | date-array + + :type default: string or int + :param default: The default value for the field. If the + field is of type ``int`` this should be an integer value. + Otherwise, it's a string. + + :type facet: bool + :param facet: A boolean to indicate whether facets + are enabled for this field or not. Does not apply to + fields of type ``int, int-array, text, text-array``. + + :type returnable: bool + :param returnable: A boolean to indicate whether values + of this field can be returned in search results or + used in ranking. + + :type searchable: bool + :param searchable: A boolean to indicate whether search + is enabled for this field or not. + + :type sortable: bool + :param sortable: A boolean to indicate whether sorting + is enabled for this field or not. Does not apply to + fields of array types. + + :type highlight: bool + :param highlight: A boolean to indicate whether highlighting + is enabled for this field or not. Does not apply to + fields of type ``double, int, date, latlon`` + + :type source_field: list of strings or string + :param source_field: For array types, this is the list of fields + to treat as the source. For singular types, pass a string only. + + :type analysis_scheme: string + :param analysis_scheme: The analysis scheme to use for this field. + Only applies to ``text | text-array`` field types + + :return: IndexFieldStatus objects + :rtype: :class:`boto.cloudsearch2.option.IndexFieldStatus` object + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + index = { + 'IndexFieldName': field_name, + 'IndexFieldType': field_type + } + if field_type == 'literal': + index['LiteralOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['LiteralOptions']['DefaultValue'] = default + if source_field: + index['LiteralOptions']['SourceField'] = source_field + elif field_type == 'literal-array': + index['LiteralArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['LiteralArrayOptions']['DefaultValue'] = default + if source_field: + index['LiteralArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'int': + index['IntOptions'] = { + 'DefaultValue': default, + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['IntOptions']['DefaultValue'] = default + if source_field: + index['IntOptions']['SourceField'] = source_field + elif field_type == 'int-array': + index['IntArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['IntArrayOptions']['DefaultValue'] = default + if source_field: + index['IntArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'date': + index['DateOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['DateOptions']['DefaultValue'] = default + if source_field: + index['DateOptions']['SourceField'] = source_field + elif field_type == 'date-array': + index['DateArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['DateArrayOptions']['DefaultValue'] = default + if source_field: + index['DateArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'double': + index['DoubleOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['DoubleOptions']['DefaultValue'] = default + if source_field: + index['DoubleOptions']['SourceField'] = source_field + elif field_type == 'double-array': + index['DoubleArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['DoubleArrayOptions']['DefaultValue'] = default + if source_field: + index['DoubleArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'text': + index['TextOptions'] = { + 'ReturnEnabled': returnable, + 'HighlightEnabled': highlight, + 'SortEnabled': sortable + } + if default: + index['TextOptions']['DefaultValue'] = default + if source_field: + index['TextOptions']['SourceField'] = source_field + if analysis_scheme: + index['TextOptions']['AnalysisScheme'] = analysis_scheme + elif field_type == 'text-array': + index['TextArrayOptions'] = { + 'ReturnEnabled': returnable, + 'HighlightEnabled': highlight + } + if default: + index['TextArrayOptions']['DefaultValue'] = default + if source_field: + index['TextArrayOptions']['SourceFields'] = \ + ','.join(source_field) + if analysis_scheme: + index['TextArrayOptions']['AnalysisScheme'] = analysis_scheme + elif field_type == 'latlon': + index['LatLonOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['LatLonOptions']['DefaultValue'] = default + if source_field: + index['LatLonOptions']['SourceField'] = source_field + + data = self.layer1.define_index_field(self.name, index) + + data = (data['DefineIndexFieldResponse'] + ['DefineIndexFieldResult'] + ['IndexField']) + + return IndexFieldStatus(self, data, + self.layer1.describe_index_fields) + + def get_expressions(self, names=None): + """ + Return a list of rank expressions defined for this domain. + :return: list of ExpressionStatus objects + :rtype: list of :class:`boto.cloudsearch2.option.ExpressionStatus` + object + """ + fn = self.layer1.describe_expressions + data = fn(self.name, names) + + data = (data['DescribeExpressionsResponse'] + ['DescribeExpressionsResult'] + ['Expressions']) + + return [ExpressionStatus(self, d, fn) for d in data] + + def create_expression(self, name, value): + """ + Create a new expression. + + :type name: string + :param name: The name of an expression for processing + during a search request. + + :type value: string + :param value: The expression to evaluate for ranking + or thresholding while processing a search request. The + Expression syntax is based on JavaScript expressions + and supports: + + * Single value, sort enabled numeric fields (int, double, date) + * Other expressions + * The _score variable, which references a document's relevance + score + * The _time variable, which references the current epoch time + * Integer, floating point, hex, and octal literals + * Arithmetic operators: + - * / % + * Bitwise operators: | & ^ ~ << >> >>> + * Boolean operators (including the ternary operator): && || ! ?: + * Comparison operators: < <= == >= > + * Mathematical functions: abs ceil exp floor ln log2 log10 logn + max min pow sqrt pow + * Trigonometric functions: acos acosh asin asinh atan atan2 atanh + cos cosh sin sinh tanh tan + * The haversin distance function + + Expressions always return an integer value from 0 to the maximum + 64-bit signed integer value (2^63 - 1). Intermediate results are + calculated as double-precision floating point values and the return + value is rounded to the nearest integer. If the expression is + invalid or evaluates to a negative value, it returns 0. If the + expression evaluates to a value greater than the maximum, it + returns the maximum value. + + The source data for an Expression can be the name of an + IndexField of type int or double, another Expression or the + reserved name _score. The _score source is + defined to return as a double from 0 to 10.0 (inclusive) to + indicate how relevant a document is to the search request, + taking into account repetition of search terms in the + document and proximity of search terms to each other in + each matching IndexField in the document. + + For more information about using rank expressions to + customize ranking, see the Amazon CloudSearch Developer + Guide. + + :return: ExpressionStatus object + :rtype: :class:`boto.cloudsearch2.option.ExpressionStatus` object + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + data = self.layer1.define_expression(self.name, name, value) + + data = (data['DefineExpressionResponse'] + ['DefineExpressionResult'] + ['Expression']) + + return ExpressionStatus(self, data, + self.layer1.describe_expressions) + + def get_document_service(self): + return DocumentServiceConnection(domain=self) + + def get_search_service(self): + return SearchConnection(domain=self) + + def __repr__(self): + return '' % self.domain_name diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..c11411396343f06810b7f95ea365d336cf51ae9d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/exceptions.py @@ -0,0 +1,46 @@ +""" +Exceptions that are specific to the cloudsearch2 module. +""" +from boto.exception import BotoServerError + + +class InvalidTypeException(BotoServerError): + """ + Raised when an invalid record type is passed to CloudSearch. + """ + pass + + +class LimitExceededException(BotoServerError): + """ + Raised when a limit has been exceeded. + """ + pass + + +class InternalException(BotoServerError): + """ + A generic server-side error. + """ + pass + + +class DisabledOperationException(BotoServerError): + """ + Raised when an operation has been disabled. + """ + pass + + +class ResourceNotFoundException(BotoServerError): + """ + Raised when a requested resource does not exist. + """ + pass + + +class BaseException(BotoServerError): + """ + A generic server-side error. + """ + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..a39b08f585d0e56b3f5e2d3658f42abfbff05492 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/layer1.py @@ -0,0 +1,783 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.cloudsearch2 import exceptions + + +class CloudSearchConnection(AWSQueryConnection): + """ + Amazon CloudSearch Configuration Service + You use the Amazon CloudSearch configuration service to create, + configure, and manage search domains. Configuration service + requests are submitted using the AWS Query protocol. AWS Query + requests are HTTP or HTTPS requests submitted via HTTP GET or POST + with a query parameter named Action. + + The endpoint for configuration service requests is region- + specific: cloudsearch. region .amazonaws.com. For example, + cloudsearch.us-east-1.amazonaws.com. For a current list of + supported regions and endpoints, see `Regions and Endpoints`_. + """ + APIVersion = "2013-01-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "InvalidTypeException": exceptions.InvalidTypeException, + "LimitExceededException": exceptions.LimitExceededException, + "InternalException": exceptions.InternalException, + "DisabledOperationException": exceptions.DisabledOperationException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "BaseException": exceptions.BaseException, + } + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + sign_request = kwargs.pop('sign_request', False) + self.sign_request = sign_request + + super(CloudSearchConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def build_suggesters(self, domain_name): + """ + Indexes the search suggestions. + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='BuildSuggesters', + verb='POST', + path='/', params=params) + + def create_domain(self, domain_name): + """ + Creates a new search domain. For more information, see + `Creating a Search Domain`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A name for the domain you are creating. Allowed + characters are a-z (lower-case letters), 0-9, and hyphen (-). + Domain names must start with a letter or number and be at least 3 + and no more than 28 characters long. + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='CreateDomain', + verb='POST', + path='/', params=params) + + def define_analysis_scheme(self, domain_name, analysis_scheme): + """ + Configures an analysis scheme that can be applied to a `text` + or `text-array` field to define language-specific text + processing options. For more information, see `Configuring + Analysis Schemes`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type analysis_scheme: dict + :param analysis_scheme: Configuration information for an analysis + scheme. Each analysis scheme has a unique name and specifies the + language of the text to be processed. The following options can be + configured for an analysis scheme: `Synonyms`, `Stopwords`, + `StemmingDictionary`, and `AlgorithmicStemming`. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'AnalysisScheme', + analysis_scheme) + return self._make_request( + action='DefineAnalysisScheme', + verb='POST', + path='/', params=params) + + def define_expression(self, domain_name, expression): + """ + Configures an `Expression` for the search domain. Used to + create new expressions and modify existing ones. If the + expression exists, the new configuration replaces the old one. + For more information, see `Configuring Expressions`_ in the + Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type expression: dict + :param expression: A named expression that can be evaluated at search + time. Can be used to sort the search results, define other + expressions, or return computed information in the search results. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'Expression', + expression) + return self._make_request( + action='DefineExpression', + verb='POST', + path='/', params=params) + + def define_index_field(self, domain_name, index_field): + """ + Configures an `IndexField` for the search domain. Used to + create new fields and modify existing ones. You must specify + the name of the domain you are configuring and an index field + configuration. The index field configuration specifies a + unique name, the index field type, and the options you want to + configure for the field. The options you can specify depend on + the `IndexFieldType`. If the field exists, the new + configuration replaces the old one. For more information, see + `Configuring Index Fields`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type index_field: dict + :param index_field: The index field and field options you want to + configure. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'IndexField', + index_field) + return self._make_request( + action='DefineIndexField', + verb='POST', + path='/', params=params) + + def define_suggester(self, domain_name, suggester): + """ + Configures a suggester for a domain. A suggester enables you + to display possible matches before users finish typing their + queries. When you configure a suggester, you must specify the + name of the text field you want to search for possible matches + and a unique name for the suggester. For more information, see + `Getting Search Suggestions`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type suggester: dict + :param suggester: Configuration information for a search suggester. + Each suggester has a unique name and specifies the text field you + want to use for suggestions. The following options can be + configured for a suggester: `FuzzyMatching`, `SortExpression`. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'Suggester', + suggester) + return self._make_request( + action='DefineSuggester', + verb='POST', + path='/', params=params) + + def delete_analysis_scheme(self, domain_name, analysis_scheme_name): + """ + Deletes an analysis scheme. For more information, see + `Configuring Analysis Schemes`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type analysis_scheme_name: string + :param analysis_scheme_name: The name of the analysis scheme you want + to delete. + + """ + params = { + 'DomainName': domain_name, + 'AnalysisSchemeName': analysis_scheme_name, + } + return self._make_request( + action='DeleteAnalysisScheme', + verb='POST', + path='/', params=params) + + def delete_domain(self, domain_name): + """ + Permanently deletes a search domain and all of its data. Once + a domain has been deleted, it cannot be recovered. For more + information, see `Deleting a Search Domain`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to permanently + delete. + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='DeleteDomain', + verb='POST', + path='/', params=params) + + def delete_expression(self, domain_name, expression_name): + """ + Removes an `Expression` from the search domain. For more + information, see `Configuring Expressions`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type expression_name: string + :param expression_name: The name of the `Expression` to delete. + + """ + params = { + 'DomainName': domain_name, + 'ExpressionName': expression_name, + } + return self._make_request( + action='DeleteExpression', + verb='POST', + path='/', params=params) + + def delete_index_field(self, domain_name, index_field_name): + """ + Removes an `IndexField` from the search domain. For more + information, see `Configuring Index Fields`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type index_field_name: string + :param index_field_name: The name of the index field your want to + remove from the domain's indexing options. + + """ + params = { + 'DomainName': domain_name, + 'IndexFieldName': index_field_name, + } + return self._make_request( + action='DeleteIndexField', + verb='POST', + path='/', params=params) + + def delete_suggester(self, domain_name, suggester_name): + """ + Deletes a suggester. For more information, see `Getting Search + Suggestions`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type suggester_name: string + :param suggester_name: Specifies the name of the suggester you want to + delete. + + """ + params = { + 'DomainName': domain_name, + 'SuggesterName': suggester_name, + } + return self._make_request( + action='DeleteSuggester', + verb='POST', + path='/', params=params) + + def describe_analysis_schemes(self, domain_name, + analysis_scheme_names=None, deployed=None): + """ + Gets the analysis schemes configured for a domain. An analysis + scheme defines language-specific text processing options for a + `text` field. Can be limited to specific analysis schemes by + name. By default, shows all analysis schemes and includes any + pending changes to the configuration. Set the `Deployed` + option to `True` to show the active configuration and exclude + pending changes. For more information, see `Configuring + Analysis Schemes`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type analysis_scheme_names: list + :param analysis_scheme_names: The analysis schemes you want to + describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if analysis_scheme_names is not None: + self.build_list_params(params, + analysis_scheme_names, + 'AnalysisSchemeNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeAnalysisSchemes', + verb='POST', + path='/', params=params) + + def describe_availability_options(self, domain_name, deployed=None): + """ + Gets the availability options configured for a domain. By + default, shows the configuration with any pending changes. Set + the `Deployed` option to `True` to show the active + configuration and exclude pending changes. For more + information, see `Configuring Availability Options`_ in the + Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeAvailabilityOptions', + verb='POST', + path='/', params=params) + + def describe_domains(self, domain_names=None): + """ + Gets information about the search domains owned by this + account. Can be limited to specific domains. Shows all domains + by default. To get the number of searchable documents in a + domain, use the console or submit a `matchall` request to your + domain's search endpoint: + `q=matchall&q.parser=structured&size=0`. For more information, + see `Getting Information about a Search Domain`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_names: list + :param domain_names: The names of the domains you want to include in + the response. + + """ + params = {} + if domain_names is not None: + self.build_list_params(params, + domain_names, + 'DomainNames.member') + return self._make_request( + action='DescribeDomains', + verb='POST', + path='/', params=params) + + def describe_expressions(self, domain_name, expression_names=None, + deployed=None): + """ + Gets the expressions configured for the search domain. Can be + limited to specific expressions by name. By default, shows all + expressions and includes any pending changes to the + configuration. Set the `Deployed` option to `True` to show the + active configuration and exclude pending changes. For more + information, see `Configuring Expressions`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type expression_names: list + :param expression_names: Limits the `DescribeExpressions` response to + the specified expressions. If not specified, all expressions are + shown. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if expression_names is not None: + self.build_list_params(params, + expression_names, + 'ExpressionNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeExpressions', + verb='POST', + path='/', params=params) + + def describe_index_fields(self, domain_name, field_names=None, + deployed=None): + """ + Gets information about the index fields configured for the + search domain. Can be limited to specific fields by name. By + default, shows all fields and includes any pending changes to + the configuration. Set the `Deployed` option to `True` to show + the active configuration and exclude pending changes. For more + information, see `Getting Domain Information`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type field_names: list + :param field_names: A list of the index fields you want to describe. If + not specified, information is returned for all configured index + fields. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if field_names is not None: + self.build_list_params(params, + field_names, + 'FieldNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeIndexFields', + verb='POST', + path='/', params=params) + + def describe_scaling_parameters(self, domain_name): + """ + Gets the scaling parameters configured for a domain. A + domain's scaling parameters specify the desired search + instance type and replication count. For more information, see + `Configuring Scaling Options`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='DescribeScalingParameters', + verb='POST', + path='/', params=params) + + def describe_service_access_policies(self, domain_name, deployed=None): + """ + Gets information about the access policies that control access + to the domain's document and search endpoints. By default, + shows the configuration with any pending changes. Set the + `Deployed` option to `True` to show the active configuration + and exclude pending changes. For more information, see + `Configuring Access for a Search Domain`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeServiceAccessPolicies', + verb='POST', + path='/', params=params) + + def describe_suggesters(self, domain_name, suggester_names=None, + deployed=None): + """ + Gets the suggesters configured for a domain. A suggester + enables you to display possible matches before users finish + typing their queries. Can be limited to specific suggesters by + name. By default, shows all suggesters and includes any + pending changes to the configuration. Set the `Deployed` + option to `True` to show the active configuration and exclude + pending changes. For more information, see `Getting Search + Suggestions`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type suggester_names: list + :param suggester_names: The suggesters you want to describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if suggester_names is not None: + self.build_list_params(params, + suggester_names, + 'SuggesterNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeSuggesters', + verb='POST', + path='/', params=params) + + def index_documents(self, domain_name): + """ + Tells the search domain to start indexing its documents using + the latest indexing options. This operation must be invoked to + activate options whose OptionStatus is + `RequiresIndexDocuments`. + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='IndexDocuments', + verb='POST', + path='/', params=params) + + def list_domain_names(self): + """ + Lists all search domains owned by an account. + """ + params = {} + return self._make_request( + action='ListDomainNames', + verb='POST', + path='/', params=params) + + def update_availability_options(self, domain_name, multi_az): + """ + Configures the availability options for a domain. Enabling the + Multi-AZ option expands an Amazon CloudSearch domain to an + additional Availability Zone in the same Region to increase + fault tolerance in the event of a service disruption. Changes + to the Multi-AZ option can take about half an hour to become + active. For more information, see `Configuring Availability + Options`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type multi_az: boolean + :param multi_az: You expand an existing search domain to a second + Availability Zone by setting the Multi-AZ option to true. + Similarly, you can turn off the Multi-AZ option to downgrade the + domain to a single Availability Zone by setting the Multi-AZ option + to `False`. + + """ + params = {'DomainName': domain_name, 'MultiAZ': multi_az, } + return self._make_request( + action='UpdateAvailabilityOptions', + verb='POST', + path='/', params=params) + + def update_scaling_parameters(self, domain_name, scaling_parameters): + """ + Configures scaling parameters for a domain. A domain's scaling + parameters specify the desired search instance type and + replication count. Amazon CloudSearch will still automatically + scale your domain based on the volume of data and traffic, but + not below the desired instance type and replication count. If + the Multi-AZ option is enabled, these values control the + resources used per Availability Zone. For more information, + see `Configuring Scaling Options`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type scaling_parameters: dict + :param scaling_parameters: The desired instance type and desired number + of replicas of each index partition. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'ScalingParameters', + scaling_parameters) + return self._make_request( + action='UpdateScalingParameters', + verb='POST', + path='/', params=params) + + def update_service_access_policies(self, domain_name, access_policies): + """ + Configures the access rules that control access to the + domain's document and search endpoints. For more information, + see ` Configuring Access for an Amazon CloudSearch Domain`_. + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type access_policies: string + :param access_policies: The access rules you want to configure. These + rules replace any existing rules. + + """ + params = { + 'DomainName': domain_name, + 'AccessPolicies': access_policies, + } + return self._make_request( + action='UpdateServiceAccessPolicies', + verb='POST', + path='/', params=params) + + def build_complex_param(self, params, label, value): + """Serialize a structure. + + For example:: + + param_type = 'structure' + label = 'IndexField' + value = {'IndexFieldName': 'a', 'IntOptions': {'DefaultValue': 5}} + + would result in the params dict being updated with these params:: + + IndexField.IndexFieldName = a + IndexField.IntOptions.DefaultValue = 5 + + :type params: dict + :param params: The params dict. The complex list params + will be added to this dict. + + :type label: str + :param label: String label for param key + + :type value: any + :param value: The value to serialize + """ + for k, v in value.items(): + if isinstance(v, dict): + for k2, v2 in v.items(): + self.build_complex_param(params, label + '.' + k, v) + elif isinstance(v, bool): + params['%s.%s' % (label, k)] = v and 'true' or 'false' + else: + params['%s.%s' % (label, k)] = v + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/layer2.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/layer2.py new file mode 100644 index 0000000000000000000000000000000000000000..28fdc74c63afc52480c72f0b0ece09d0cea6da25 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/layer2.py @@ -0,0 +1,94 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.cloudsearch2.layer1 import CloudSearchConnection +from boto.cloudsearch2.domain import Domain +from boto.compat import six + + +class Layer2(object): + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + host=None, debug=0, session_token=None, region=None, + validate_certs=True, sign_request=False): + + if isinstance(region, six.string_types): + import boto.cloudsearch2 + for region_info in boto.cloudsearch2.regions(): + if region_info.name == region: + region = region_info + break + + self.layer1 = CloudSearchConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + is_secure=is_secure, + port=port, + proxy=proxy, + proxy_port=proxy_port, + host=host, + debug=debug, + security_token=session_token, + region=region, + validate_certs=validate_certs, + sign_request=sign_request) + + def list_domains(self, domain_names=None): + """ + Return a list of objects for each domain defined in the + current account. + :rtype: list of :class:`boto.cloudsearch2.domain.Domain` + """ + domain_data = self.layer1.describe_domains(domain_names) + + domain_data = (domain_data['DescribeDomainsResponse'] + ['DescribeDomainsResult'] + ['DomainStatusList']) + + return [Domain(self.layer1, data) for data in domain_data] + + def create_domain(self, domain_name): + """ + Create a new CloudSearch domain and return the corresponding object. + :return: Domain object, or None if the domain isn't found + :rtype: :class:`boto.cloudsearch2.domain.Domain` + """ + data = self.layer1.create_domain(domain_name) + return Domain(self.layer1, data['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + def lookup(self, domain_name): + """ + Lookup a single domain + :param domain_name: The name of the domain to look up + :type domain_name: str + + :return: Domain object, or None if the domain isn't found + :rtype: :class:`boto.cloudsearch2.domain.Domain` + """ + domains = self.list_domains(domain_names=[domain_name]) + if len(domains) > 0: + return domains[0] diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/optionstatus.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/optionstatus.py new file mode 100644 index 0000000000000000000000000000000000000000..0a45bea4f03cf13310386cbbf0f64a4387bef530 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/optionstatus.py @@ -0,0 +1,233 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.compat import json + + +class OptionStatus(dict): + """ + Presents a combination of status field (defined below) which are + accessed as attributes and option values which are stored in the + native Python dictionary. In this class, the option values are + merged from a JSON object that is stored as the Option part of + the object. + + :ivar domain_name: The name of the domain this option is associated with. + :ivar create_date: A timestamp for when this option was created. + :ivar state: The state of processing a change to an option. + Possible values: + + * RequiresIndexDocuments: the option's latest value will not + be visible in searches until IndexDocuments has been called + and indexing is complete. + * Processing: the option's latest value is not yet visible in + all searches but is in the process of being activated. + * Active: the option's latest value is completely visible. + + :ivar update_date: A timestamp for when this option was updated. + :ivar update_version: A unique integer that indicates when this + option was last updated. + """ + + def __init__(self, domain, data=None, refresh_fn=None, refresh_key=None, + save_fn=None): + self.domain = domain + self.refresh_fn = refresh_fn + self.refresh_key = refresh_key + self.save_fn = save_fn + self.refresh(data) + + def _update_status(self, status): + self.creation_date = status['CreationDate'] + self.status = status['State'] + self.update_date = status['UpdateDate'] + self.update_version = int(status['UpdateVersion']) + + def _update_options(self, options): + if options: + self.update(options) + + def refresh(self, data=None): + """ + Refresh the local state of the object. You can either pass + new state data in as the parameter ``data`` or, if that parameter + is omitted, the state data will be retrieved from CloudSearch. + """ + if not data: + if self.refresh_fn: + data = self.refresh_fn(self.domain.name) + + if data and self.refresh_key: + # Attempt to pull out the right nested bag of data + for key in self.refresh_key: + data = data[key] + if data: + self._update_status(data['Status']) + self._update_options(data['Options']) + + def to_json(self): + """ + Return the JSON representation of the options as a string. + """ + return json.dumps(self) + + def save(self): + """ + Write the current state of the local object back to the + CloudSearch service. + """ + if self.save_fn: + data = self.save_fn(self.domain.name, self.to_json()) + self.refresh(data) + + +class IndexFieldStatus(OptionStatus): + def save(self): + pass + + +class AvailabilityOptionsStatus(OptionStatus): + def save(self): + pass + + +class ScalingParametersStatus(IndexFieldStatus): + pass + + +class ExpressionStatus(IndexFieldStatus): + pass + + +class ServicePoliciesStatus(OptionStatus): + + def new_statement(self, arn, ip): + """ + Returns a new policy statement that will allow + access to the service described by ``arn`` by the + ip specified in ``ip``. + + :type arn: string + :param arn: The Amazon Resource Notation identifier for the + service you wish to provide access to. This would be + either the search service or the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + return { + "Effect": "Allow", + "Action": "*", # Docs say use GET, but denies unless * + "Resource": arn, + "Condition": { + "IpAddress": { + "aws:SourceIp": [ip] + } + } + } + + def _allow_ip(self, arn, ip): + if 'Statement' not in self: + s = self.new_statement(arn, ip) + self['Statement'] = [s] + self.save() + else: + add_statement = True + for statement in self['Statement']: + if statement['Resource'] == arn: + for condition_name in statement['Condition']: + if condition_name == 'IpAddress': + add_statement = False + condition = statement['Condition'][condition_name] + if ip not in condition['aws:SourceIp']: + condition['aws:SourceIp'].append(ip) + + if add_statement: + s = self.new_statement(arn, ip) + self['Statement'].append(s) + self.save() + + def allow_search_ip(self, ip): + """ + Add the provided ip address or CIDR block to the list of + allowable address for the search service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._allow_ip(arn, ip) + + def allow_doc_ip(self, ip): + """ + Add the provided ip address or CIDR block to the list of + allowable address for the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._allow_ip(arn, ip) + + def _disallow_ip(self, arn, ip): + if 'Statement' not in self: + return + need_update = False + for statement in self['Statement']: + if statement['Resource'] == arn: + for condition_name in statement['Condition']: + if condition_name == 'IpAddress': + condition = statement['Condition'][condition_name] + if ip in condition['aws:SourceIp']: + condition['aws:SourceIp'].remove(ip) + need_update = True + if need_update: + self.save() + + def disallow_search_ip(self, ip): + """ + Remove the provided ip address or CIDR block from the list of + allowable address for the search service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._disallow_ip(arn, ip) + + def disallow_doc_ip(self, ip): + """ + Remove the provided ip address or CIDR block from the list of + allowable address for the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._disallow_ip(arn, ip) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/search.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/search.py new file mode 100644 index 0000000000000000000000000000000000000000..3db3a472e1bf9bf9c635296793062756f0457cac --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/search.py @@ -0,0 +1,452 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from math import ceil +from boto.compat import json, map, six +import requests +from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + +SIMPLE = 'simple' +STRUCTURED = 'structured' +LUCENE = 'lucene' +DISMAX = 'dismax' + + +class SearchServiceException(Exception): + pass + + +class SearchResults(object): + def __init__(self, **attrs): + self.rid = attrs['status']['rid'] + self.time_ms = attrs['status']['time-ms'] + self.hits = attrs['hits']['found'] + self.docs = attrs['hits']['hit'] + self.start = attrs['hits']['start'] + self.query = attrs['query'] + self.search_service = attrs['search_service'] + + self.facets = {} + if 'facets' in attrs: + for (facet, values) in attrs['facets'].items(): + if 'buckets' in values: + self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values.get('buckets', []))) + + self.num_pages_needed = ceil(self.hits / self.query.real_size) + + def __len__(self): + return len(self.docs) + + def __iter__(self): + return iter(self.docs) + + def next_page(self): + """Call Cloudsearch to get the next page of search results + + :rtype: :class:`boto.cloudsearch2.search.SearchResults` + :return: the following page of search results + """ + if self.query.page <= self.num_pages_needed: + self.query.start += self.query.real_size + self.query.page += 1 + return self.search_service(self.query) + else: + raise StopIteration + + +class Query(object): + + RESULTS_PER_PAGE = 500 + + def __init__(self, q=None, parser=None, fq=None, expr=None, + return_fields=None, size=10, start=0, sort=None, + facet=None, highlight=None, partial=None, options=None): + + self.q = q + self.parser = parser + self.fq = fq + self.expr = expr or {} + self.sort = sort or [] + self.return_fields = return_fields or [] + self.start = start + self.facet = facet or {} + self.highlight = highlight or {} + self.partial = partial + self.options = options + self.page = 0 + self.update_size(size) + + def update_size(self, new_size): + self.size = new_size + self.real_size = Query.RESULTS_PER_PAGE if (self.size > + Query.RESULTS_PER_PAGE or self.size == 0) else self.size + + def to_params(self): + """Transform search parameters from instance properties to a dictionary + + :rtype: dict + :return: search parameters + """ + params = {'start': self.start, 'size': self.real_size} + + if self.q: + params['q'] = self.q + + if self.parser: + params['q.parser'] = self.parser + + if self.fq: + params['fq'] = self.fq + + if self.expr: + for k, v in six.iteritems(self.expr): + params['expr.%s' % k] = v + + if self.facet: + for k, v in six.iteritems(self.facet): + if not isinstance(v, six.string_types): + v = json.dumps(v) + params['facet.%s' % k] = v + + if self.highlight: + for k, v in six.iteritems(self.highlight): + params['highlight.%s' % k] = v + + if self.options: + params['q.options'] = self.options + + if self.return_fields: + params['return'] = ','.join(self.return_fields) + + if self.partial is not None: + params['partial'] = self.partial + + if self.sort: + params['sort'] = ','.join(self.sort) + + return params + + def to_domain_connection_params(self): + """ + Transform search parameters from instance properties to a dictionary + that CloudSearchDomainConnection can accept + + :rtype: dict + :return: search parameters + """ + params = {'start': self.start, 'size': self.real_size} + + if self.q: + params['q'] = self.q + + if self.parser: + params['query_parser'] = self.parser + + if self.fq: + params['filter_query'] = self.fq + + if self.expr: + expr = {} + for k, v in six.iteritems(self.expr): + expr['expr.%s' % k] = v + + params['expr'] = expr + + if self.facet: + facet = {} + for k, v in six.iteritems(self.facet): + if not isinstance(v, six.string_types): + v = json.dumps(v) + facet['facet.%s' % k] = v + + params['facet'] = facet + + if self.highlight: + highlight = {} + for k, v in six.iteritems(self.highlight): + highlight['highlight.%s' % k] = v + + params['highlight'] = highlight + + if self.options: + params['query_options'] = self.options + + if self.return_fields: + params['ret'] = ','.join(self.return_fields) + + if self.partial is not None: + params['partial'] = self.partial + + if self.sort: + params['sort'] = ','.join(self.sort) + + return params + + +class SearchConnection(object): + + def __init__(self, domain=None, endpoint=None): + self.domain = domain + self.endpoint = endpoint + self.session = requests.Session() + + # Endpoint needs to be set before initializing CloudSearchDomainConnection + if not endpoint: + self.endpoint = domain.search_service_endpoint + + # Copy proxy settings from connection and check if request should be signed + self.sign_request = False + if self.domain and self.domain.layer1: + if self.domain.layer1.use_proxy: + self.session.proxies['http'] = self.domain.layer1.get_proxy_url_with_auth() + + self.sign_request = getattr(self.domain.layer1, 'sign_request', False) + + if self.sign_request: + layer1 = self.domain.layer1 + self.domain_connection = CloudSearchDomainConnection( + host=self.endpoint, + aws_access_key_id=layer1.aws_access_key_id, + aws_secret_access_key=layer1.aws_secret_access_key, + region=layer1.region, + provider=layer1.provider + ) + + def build_query(self, q=None, parser=None, fq=None, rank=None, return_fields=None, + size=10, start=0, facet=None, highlight=None, sort=None, + partial=None, options=None): + return Query(q=q, parser=parser, fq=fq, expr=rank, return_fields=return_fields, + size=size, start=start, facet=facet, highlight=highlight, + sort=sort, partial=partial, options=options) + + def search(self, q=None, parser=None, fq=None, rank=None, return_fields=None, + size=10, start=0, facet=None, highlight=None, sort=None, partial=None, + options=None): + """ + Send a query to CloudSearch + + Each search query should use at least the q or bq argument to specify + the search parameter. The other options are used to specify the + criteria of the search. + + :type q: string + :param q: A string to search the default search fields for. + + :type parser: string + :param parser: The parser to use. 'simple', 'structured', 'lucene', 'dismax' + + :type fq: string + :param fq: The filter query to use. + + :type sort: List of strings + :param sort: A list of fields or rank expressions used to order the + search results. Order is handled by adding 'desc' or 'asc' after the field name. + ``['year desc', 'author asc']`` + + :type return_fields: List of strings + :param return_fields: A list of fields which should be returned by the + search. If this field is not specified, only IDs will be returned. + ``['headline']`` + + :type size: int + :param size: Number of search results to specify + + :type start: int + :param start: Offset of the first search result to return (can be used + for paging) + + :type facet: dict + :param facet: Dictionary of fields for which facets should be returned + The facet value is string of JSON options + ``{'year': '{sort:"bucket", size:3}', 'genres': '{buckets:["Action","Adventure","Sci-Fi"]}'}`` + + :type highlight: dict + :param highlight: Dictionary of fields for which highlights should be returned + The facet value is string of JSON options + ``{'genres': '{format:'text',max_phrases:2,pre_tag:'',post_tag:''}'}`` + + :type partial: bool + :param partial: Should partial results from a partioned service be returned if + one or more index partitions are unreachable. + + :type options: str + :param options: Options for the query parser specified in *parser*. + Specified as a string in JSON format. + ``{fields: ['title^5', 'description']}`` + + :rtype: :class:`boto.cloudsearch2.search.SearchResults` + :return: Returns the results of this search + + The following examples all assume we have indexed a set of documents + with fields: *author*, *date*, *headline* + + A simple search will look for documents whose default text search + fields will contain the search word exactly: + + >>> search(q='Tim') # Return documents with the word Tim in them (but not Timothy) + + A simple search with more keywords will return documents whose default + text search fields contain the search strings together or separately. + + >>> search(q='Tim apple') # Will match "tim" and "apple" + + More complex searches require the boolean search operator. + + Wildcard searches can be used to search for any words that start with + the search string. + + >>> search(q="'Tim*'") # Return documents with words like Tim or Timothy) + + Search terms can also be combined. Allowed operators are "and", "or", + "not", "field", "optional", "token", "phrase", or "filter" + + >>> search(q="(and 'Tim' (field author 'John Smith'))", parser='structured') + + Facets allow you to show classification information about the search + results. For example, you can retrieve the authors who have written + about Tim with a max of 3 + + >>> search(q='Tim', facet={'Author': '{sort:"bucket", size:3}'}) + """ + + query = self.build_query(q=q, parser=parser, fq=fq, rank=rank, + return_fields=return_fields, + size=size, start=start, facet=facet, + highlight=highlight, sort=sort, + partial=partial, options=options) + return self(query) + + def _search_with_auth(self, params): + return self.domain_connection.search(params.pop("q", ""), **params) + + def _search_without_auth(self, params, api_version): + url = "http://%s/%s/search" % (self.endpoint, api_version) + resp = self.session.get(url, params=params) + + return {'body': resp.content.decode('utf-8'), 'status_code': resp.status_code} + + def __call__(self, query): + """Make a call to CloudSearch + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: A group of search criteria + + :rtype: :class:`boto.cloudsearch2.search.SearchResults` + :return: search results + """ + api_version = '2013-01-01' + if self.domain and self.domain.layer1: + api_version = self.domain.layer1.APIVersion + + if self.sign_request: + data = self._search_with_auth(query.to_domain_connection_params()) + else: + r = self._search_without_auth(query.to_params(), api_version) + + _body = r['body'] + _status_code = r['status_code'] + + try: + data = json.loads(_body) + except ValueError: + if _status_code == 403: + msg = '' + import re + g = re.search('

403 Forbidden

([^<]+)<', _body) + try: + msg = ': %s' % (g.groups()[0].strip()) + except AttributeError: + pass + raise SearchServiceException('Authentication error from Amazon%s' % msg) + raise SearchServiceException("Got non-json response from Amazon. %s" % _body, query) + + if 'messages' in data and 'error' in data: + for m in data['messages']: + if m['severity'] == 'fatal': + raise SearchServiceException("Error processing search %s " + "=> %s" % (params, m['message']), query) + elif 'error' in data: + raise SearchServiceException("Unknown error processing search %s" + % json.dumps(data), query) + + data['query'] = query + data['search_service'] = self + + return SearchResults(**data) + + def get_all_paged(self, query, per_page): + """Get a generator to iterate over all pages of search results + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: A group of search criteria + + :type per_page: int + :param per_page: Number of docs in each :class:`boto.cloudsearch2.search.SearchResults` object. + + :rtype: generator + :return: Generator containing :class:`boto.cloudsearch2.search.SearchResults` + """ + query.update_size(per_page) + page = 0 + num_pages_needed = 0 + while page <= num_pages_needed: + results = self(query) + num_pages_needed = results.num_pages_needed + yield results + query.start += query.real_size + page += 1 + + def get_all_hits(self, query): + """Get a generator to iterate over all search results + + Transparently handles the results paging from Cloudsearch + search results so even if you have many thousands of results + you can iterate over all results in a reasonably efficient + manner. + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: A group of search criteria + + :rtype: generator + :return: All docs matching query + """ + page = 0 + num_pages_needed = 0 + while page <= num_pages_needed: + results = self(query) + num_pages_needed = results.num_pages_needed + for doc in results: + yield doc + query.start += query.real_size + page += 1 + + def get_num_hits(self, query): + """Return the total number of hits for query + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: a group of search criteria + + :rtype: int + :return: Total number of hits for query + """ + query.update_size(1) + return self(query).hits diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearchdomain/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearchdomain/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1b307a0fd04eef43625d6bf0ea7914f33b881733 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearchdomain/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon CloudSearch Domain service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + return get_regions('cloudsearchdomain', + connection_cls=CloudSearchDomainConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearchdomain/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearchdomain/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..0f9961532d6964814af49fd66b018a496989fa25 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearchdomain/exceptions.py @@ -0,0 +1,30 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class SearchException(BotoServerError): + pass + + +class DocumentServiceException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudsearchdomain/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearchdomain/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..7a68bbed2282510575e0e4cb6d6bc04a91c67255 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudsearchdomain/layer1.py @@ -0,0 +1,540 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import json +from boto.exception import JSONResponseError +from boto.connection import AWSAuthConnection +from boto.regioninfo import RegionInfo +from boto.cloudsearchdomain import exceptions + + +class CloudSearchDomainConnection(AWSAuthConnection): + """ + You use the AmazonCloudSearch2013 API to upload documents to a + search domain and search those documents. + + The endpoints for submitting `UploadDocuments`, `Search`, and + `Suggest` requests are domain-specific. To get the endpoints for + your domain, use the Amazon CloudSearch configuration service + `DescribeDomains` action. The domain endpoints are also displayed + on the domain dashboard in the Amazon CloudSearch console. You + submit suggest requests to the search endpoint. + + For more information, see the `Amazon CloudSearch Developer + Guide`_. + """ + APIVersion = "2013-01-01" + AuthServiceName = 'cloudsearch' + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "SearchException": exceptions.SearchException, + "DocumentServiceException": exceptions.DocumentServiceException, + } + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + else: + del kwargs['region'] + if kwargs.get('host', None) is None: + raise ValueError( + 'The argument, host, must be provided when creating a ' + 'CloudSearchDomainConnection because its methods require the ' + 'specific domain\'s endpoint in order to successfully make ' + 'requests to that CloudSearch Domain.' + ) + super(CloudSearchDomainConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def search(self, query, cursor=None, expr=None, facet=None, + filter_query=None, highlight=None, partial=None, + query_options=None, query_parser=None, ret=None, size=None, + sort=None, start=None): + """ + Retrieves a list of documents that match the specified search + criteria. How you specify the search criteria depends on which + query parser you use. Amazon CloudSearch supports four query + parsers: + + + + `simple`: search all `text` and `text-array` fields for the + specified string. Search for phrases, individual terms, and + prefixes. + + `structured`: search specific fields, construct compound + queries using Boolean operators, and use advanced features + such as term boosting and proximity searching. + + `lucene`: specify search criteria using the Apache Lucene + query parser syntax. + + `dismax`: specify search criteria using the simplified + subset of the Apache Lucene query parser syntax defined by the + DisMax query parser. + + + For more information, see `Searching Your Data`_ in the Amazon + CloudSearch Developer Guide . + + The endpoint for submitting `Search` requests is domain- + specific. You submit search requests to a domain's search + endpoint. To get the search endpoint for your domain, use the + Amazon CloudSearch configuration service `DescribeDomains` + action. A domain's endpoints are also displayed on the domain + dashboard in the Amazon CloudSearch console. + + :type cursor: string + :param cursor: Retrieves a cursor value you can use to page through + large result sets. Use the `size` parameter to control the number + of hits to include in each response. You can specify either the + `cursor` or `start` parameter in a request; they are mutually + exclusive. To get the first cursor, set the cursor value to + `initial`. In subsequent requests, specify the cursor value + returned in the hits section of the response. + For more information, see `Paginating Results`_ in the Amazon + CloudSearch Developer Guide . + + :type expr: string + :param expr: Defines one or more numeric expressions that can be used + to sort results or specify search or filter criteria. You can also + specify expressions as return fields. + For more information about defining and using expressions, see + `Configuring Expressions`_ in the Amazon CloudSearch Developer + Guide . + + :type facet: string + :param facet: Specifies one or more fields for which to get facet + information, and options that control how the facet information is + returned. Each specified field must be facet-enabled in the domain + configuration. The fields and options are specified in JSON using + the form `{"FIELD":{"OPTION":VALUE,"OPTION:"STRING"},"FIELD":{"OPTI + ON":VALUE,"OPTION":"STRING"}}`. + You can specify the following faceting options: + + + + `buckets` specifies an array of the facet values or ranges to count. + Ranges are specified using the same syntax that you use to search + for a range of values. For more information, see ` Searching for a + Range of Values`_ in the Amazon CloudSearch Developer Guide . + Buckets are returned in the order they are specified in the + request. The `sort` and `size` options are not valid if you specify + `buckets`. + + `size` specifies the maximum number of facets to include in the + results. By default, Amazon CloudSearch returns counts for the top + 10. The `size` parameter is only valid when you specify the `sort` + option; it cannot be used in conjunction with `buckets`. + + `sort` specifies how you want to sort the facets in the results: + `bucket` or `count`. Specify `bucket` to sort alphabetically or + numerically by facet value (in ascending order). Specify `count` to + sort by the facet counts computed for each facet value (in + descending order). To retrieve facet counts for particular values + or ranges of values, use the `buckets` option instead of `sort`. + + + If no facet options are specified, facet counts are computed for all + field values, the facets are sorted by facet count, and the top 10 + facets are returned in the results. + + For more information, see `Getting and Using Facet Information`_ in the + Amazon CloudSearch Developer Guide . + + :type filter_query: string + :param filter_query: Specifies a structured query that filters the + results of a search without affecting how the results are scored + and sorted. You use `filterQuery` in conjunction with the `query` + parameter to filter the documents that match the constraints + specified in the `query` parameter. Specifying a filter controls + only which matching documents are included in the results, it has + no effect on how they are scored and sorted. The `filterQuery` + parameter supports the full structured query syntax. + For more information about using filters, see `Filtering Matching + Documents`_ in the Amazon CloudSearch Developer Guide . + + :type highlight: string + :param highlight: Retrieves highlights for matches in the specified + `text` or `text-array` fields. Each specified field must be + highlight enabled in the domain configuration. The fields and + options are specified in JSON using the form `{"FIELD":{"OPTION":VA + LUE,"OPTION:"STRING"},"FIELD":{"OPTION":VALUE,"OPTION":"STRING"}}`. + You can specify the following highlight options: + + + + `format`: specifies the format of the data in the text field: `text` + or `html`. When data is returned as HTML, all non-alphanumeric + characters are encoded. The default is `html`. + + `max_phrases`: specifies the maximum number of occurrences of the + search term(s) you want to highlight. By default, the first + occurrence is highlighted. + + `pre_tag`: specifies the string to prepend to an occurrence of a + search term. The default for HTML highlights is ``. The + default for text highlights is `*`. + + `post_tag`: specifies the string to append to an occurrence of a + search term. The default for HTML highlights is ``. The + default for text highlights is `*`. + + + If no highlight options are specified for a field, the returned field + text is treated as HTML and the first match is highlighted with + emphasis tags: `search-term`. + + :type partial: boolean + :param partial: Enables partial results to be returned if one or more + index partitions are unavailable. When your search index is + partitioned across multiple search instances, by default Amazon + CloudSearch only returns results if every partition can be queried. + This means that the failure of a single search instance can result + in 5xx (internal server) errors. When you enable partial results, + Amazon CloudSearch returns whatever results are available and + includes the percentage of documents searched in the search results + (percent-searched). This enables you to more gracefully degrade + your users' search experience. For example, rather than displaying + no results, you could display the partial results and a message + indicating that the results might be incomplete due to a temporary + system outage. + + :type query: string + :param query: Specifies the search criteria for the request. How you + specify the search criteria depends on the query parser used for + the request and the parser options specified in the `queryOptions` + parameter. By default, the `simple` query parser is used to process + requests. To use the `structured`, `lucene`, or `dismax` query + parser, you must also specify the `queryParser` parameter. + For more information about specifying search criteria, see `Searching + Your Data`_ in the Amazon CloudSearch Developer Guide . + + :type query_options: string + :param query_options: + Configures options for the query parser specified in the `queryParser` + parameter. + + The options you can configure vary according to which parser you use: + + + + `defaultOperator`: The default operator used to combine individual + terms in the search string. For example: `defaultOperator: 'or'`. + For the `dismax` parser, you specify a percentage that represents + the percentage of terms in the search string (rounded down) that + must match, rather than a default operator. A value of `0%` is the + equivalent to OR, and a value of `100%` is equivalent to AND. The + percentage must be specified as a value in the range 0-100 followed + by the percent (%) symbol. For example, `defaultOperator: 50%`. + Valid values: `and`, `or`, a percentage in the range 0%-100% ( + `dismax`). Default: `and` ( `simple`, `structured`, `lucene`) or + `100` ( `dismax`). Valid for: `simple`, `structured`, `lucene`, and + `dismax`. + + `fields`: An array of the fields to search when no fields are + specified in a search. If no fields are specified in a search and + this option is not specified, all text and text-array fields are + searched. You can specify a weight for each field to control the + relative importance of each field when Amazon CloudSearch + calculates relevance scores. To specify a field weight, append a + caret ( `^`) symbol and the weight to the field name. For example, + to boost the importance of the `title` field over the `description` + field you could specify: `"fields":["title^5","description"]`. + Valid values: The name of any configured field and an optional + numeric value greater than zero. Default: All `text` and `text- + array` fields. Valid for: `simple`, `structured`, `lucene`, and + `dismax`. + + `operators`: An array of the operators or special characters you want + to disable for the simple query parser. If you disable the `and`, + `or`, or `not` operators, the corresponding operators ( `+`, `|`, + `-`) have no special meaning and are dropped from the search + string. Similarly, disabling `prefix` disables the wildcard + operator ( `*`) and disabling `phrase` disables the ability to + search for phrases by enclosing phrases in double quotes. Disabling + precedence disables the ability to control order of precedence + using parentheses. Disabling `near` disables the ability to use the + ~ operator to perform a sloppy phrase search. Disabling the `fuzzy` + operator disables the ability to use the ~ operator to perform a + fuzzy search. `escape` disables the ability to use a backslash ( + `\`) to escape special characters within the search string. + Disabling whitespace is an advanced option that prevents the parser + from tokenizing on whitespace, which can be useful for Vietnamese. + (It prevents Vietnamese words from being split incorrectly.) For + example, you could disable all operators other than the phrase + operator to support just simple term and phrase queries: + `"operators":["and","not","or", "prefix"]`. Valid values: `and`, + `escape`, `fuzzy`, `near`, `not`, `or`, `phrase`, `precedence`, + `prefix`, `whitespace`. Default: All operators and special + characters are enabled. Valid for: `simple`. + + `phraseFields`: An array of the `text` or `text-array` fields you + want to use for phrase searches. When the terms in the search + string appear in close proximity within a field, the field scores + higher. You can specify a weight for each field to boost that + score. The `phraseSlop` option controls how much the matches can + deviate from the search string and still be boosted. To specify a + field weight, append a caret ( `^`) symbol and the weight to the + field name. For example, to boost phrase matches in the `title` + field over the `abstract` field, you could specify: + `"phraseFields":["title^3", "plot"]` Valid values: The name of any + `text` or `text-array` field and an optional numeric value greater + than zero. Default: No fields. If you don't specify any fields with + `phraseFields`, proximity scoring is disabled even if `phraseSlop` + is specified. Valid for: `dismax`. + + `phraseSlop`: An integer value that specifies how much matches can + deviate from the search phrase and still be boosted according to + the weights specified in the `phraseFields` option; for example, + `phraseSlop: 2`. You must also specify `phraseFields` to enable + proximity scoring. Valid values: positive integers. Default: 0. + Valid for: `dismax`. + + `explicitPhraseSlop`: An integer value that specifies how much a + match can deviate from the search phrase when the phrase is + enclosed in double quotes in the search string. (Phrases that + exceed this proximity distance are not considered a match.) For + example, to specify a slop of three for dismax phrase queries, you + would specify `"explicitPhraseSlop":3`. Valid values: positive + integers. Default: 0. Valid for: `dismax`. + + `tieBreaker`: When a term in the search string is found in a + document's field, a score is calculated for that field based on how + common the word is in that field compared to other documents. If + the term occurs in multiple fields within a document, by default + only the highest scoring field contributes to the document's + overall score. You can specify a `tieBreaker` value to enable the + matches in lower-scoring fields to contribute to the document's + score. That way, if two documents have the same max field score for + a particular term, the score for the document that has matches in + more fields will be higher. The formula for calculating the score + with a tieBreaker is `(max field score) + (tieBreaker) * (sum of + the scores for the rest of the matching fields)`. Set `tieBreaker` + to 0 to disregard all but the highest scoring field (pure max): + `"tieBreaker":0`. Set to 1 to sum the scores from all fields (pure + sum): `"tieBreaker":1`. Valid values: 0.0 to 1.0. Default: 0.0. + Valid for: `dismax`. + + :type query_parser: string + :param query_parser: + Specifies which query parser to use to process the request. If + `queryParser` is not specified, Amazon CloudSearch uses the + `simple` query parser. + + Amazon CloudSearch supports four query parsers: + + + + `simple`: perform simple searches of `text` and `text-array` fields. + By default, the `simple` query parser searches all `text` and + `text-array` fields. You can specify which fields to search by with + the `queryOptions` parameter. If you prefix a search term with a + plus sign (+) documents must contain the term to be considered a + match. (This is the default, unless you configure the default + operator with the `queryOptions` parameter.) You can use the `-` + (NOT), `|` (OR), and `*` (wildcard) operators to exclude particular + terms, find results that match any of the specified terms, or + search for a prefix. To search for a phrase rather than individual + terms, enclose the phrase in double quotes. For more information, + see `Searching for Text`_ in the Amazon CloudSearch Developer Guide + . + + `structured`: perform advanced searches by combining multiple + expressions to define the search criteria. You can also search + within particular fields, search for values and ranges of values, + and use advanced options such as term boosting, `matchall`, and + `near`. For more information, see `Constructing Compound Queries`_ + in the Amazon CloudSearch Developer Guide . + + `lucene`: search using the Apache Lucene query parser syntax. For + more information, see `Apache Lucene Query Parser Syntax`_. + + `dismax`: search using the simplified subset of the Apache Lucene + query parser syntax defined by the DisMax query parser. For more + information, see `DisMax Query Parser Syntax`_. + + :type ret: string + :param ret: Specifies the field and expression values to include in + the response. Multiple fields or expressions are specified as a + comma-separated list. By default, a search response includes all + return enabled fields ( `_all_fields`). To return only the document + IDs for the matching documents, specify `_no_fields`. To retrieve + the relevance score calculated for each document, specify `_score`. + + :type size: long + :param size: Specifies the maximum number of search hits to include in + the response. + + :type sort: string + :param sort: Specifies the fields or custom expressions to use to sort + the search results. Multiple fields or expressions are specified as + a comma-separated list. You must specify the sort direction ( `asc` + or `desc`) for each field; for example, `year desc,title asc`. To + use a field to sort results, the field must be sort-enabled in the + domain configuration. Array type fields cannot be used for sorting. + If no `sort` parameter is specified, results are sorted by their + default relevance scores in descending order: `_score desc`. You + can also sort by document ID ( `_id asc`) and version ( `_version + desc`). + For more information, see `Sorting Results`_ in the Amazon CloudSearch + Developer Guide . + + :type start: long + :param start: Specifies the offset of the first search hit you want to + return. Note that the result set is zero-based; the first result is + at index 0. You can specify either the `start` or `cursor` + parameter in a request, they are mutually exclusive. + For more information, see `Paginating Results`_ in the Amazon + CloudSearch Developer Guide . + + """ + uri = '/2013-01-01/search' + params = {} + headers = {} + query_params = {} + if cursor is not None: + query_params['cursor'] = cursor + if expr is not None: + query_params['expr'] = expr + if facet is not None: + query_params['facet'] = facet + if filter_query is not None: + query_params['fq'] = filter_query + if highlight is not None: + query_params['highlight'] = highlight + if partial is not None: + query_params['partial'] = partial + if query is not None: + query_params['q'] = query + if query_options is not None: + query_params['q.options'] = query_options + if query_parser is not None: + query_params['q.parser'] = query_parser + if ret is not None: + query_params['return'] = ret + if size is not None: + query_params['size'] = size + if sort is not None: + query_params['sort'] = sort + if start is not None: + query_params['start'] = start + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def suggest(self, query, suggester, size=None): + """ + Retrieves autocomplete suggestions for a partial query string. + You can use suggestions enable you to display likely matches + before users finish typing. In Amazon CloudSearch, suggestions + are based on the contents of a particular text field. When you + request suggestions, Amazon CloudSearch finds all of the + documents whose values in the suggester field start with the + specified query string. The beginning of the field must match + the query string to be considered a match. + + For more information about configuring suggesters and + retrieving suggestions, see `Getting Suggestions`_ in the + Amazon CloudSearch Developer Guide . + + The endpoint for submitting `Suggest` requests is domain- + specific. You submit suggest requests to a domain's search + endpoint. To get the search endpoint for your domain, use the + Amazon CloudSearch configuration service `DescribeDomains` + action. A domain's endpoints are also displayed on the domain + dashboard in the Amazon CloudSearch console. + + :type query: string + :param query: Specifies the string for which you want to get + suggestions. + + :type suggester: string + :param suggester: Specifies the name of the suggester to use to find + suggested matches. + + :type size: long + :param size: Specifies the maximum number of suggestions to return. + + """ + uri = '/2013-01-01/suggest' + params = {} + headers = {} + query_params = {} + if query is not None: + query_params['q'] = query + if suggester is not None: + query_params['suggester'] = suggester + if size is not None: + query_params['size'] = size + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def upload_documents(self, documents, content_type): + """ + Posts a batch of documents to a search domain for indexing. A + document batch is a collection of add and delete operations + that represent the documents you want to add, update, or + delete from your domain. Batches can be described in either + JSON or XML. Each item that you want Amazon CloudSearch to + return as a search result (such as a product) is represented + as a document. Every document has a unique ID and one or more + fields that contain the data that you want to search and + return in results. Individual documents cannot contain more + than 1 MB of data. The entire batch cannot exceed 5 MB. To get + the best possible upload performance, group add and delete + operations in batches that are close the 5 MB limit. + Submitting a large volume of single-document batches can + overload a domain's document service. + + The endpoint for submitting `UploadDocuments` requests is + domain-specific. To get the document endpoint for your domain, + use the Amazon CloudSearch configuration service + `DescribeDomains` action. A domain's endpoints are also + displayed on the domain dashboard in the Amazon CloudSearch + console. + + For more information about formatting your data for Amazon + CloudSearch, see `Preparing Your Data`_ in the Amazon + CloudSearch Developer Guide . For more information about + uploading data for indexing, see `Uploading Data`_ in the + Amazon CloudSearch Developer Guide . + + :type documents: blob + :param documents: A batch of documents formatted in JSON or HTML. + + :type content_type: string + :param content_type: + The format of the batch you are uploading. Amazon CloudSearch supports + two document batch formats: + + + + application/json + + application/xml + + """ + uri = '/2013-01-01/documents/batch' + headers = {} + query_params = {} + if content_type is not None: + headers['Content-Type'] = content_type + return self.make_request('POST', uri, expected_status=200, + data=documents, headers=headers, + params=query_params) + + def make_request(self, verb, resource, headers=None, data='', + expected_status=None, params=None): + if headers is None: + headers = {} + response = AWSAuthConnection.make_request( + self, verb, resource, headers=headers, data=data, params=params) + body = json.loads(response.read().decode('utf-8')) + if response.status == expected_status: + return body + else: + raise JSONResponseError(response.status, response.reason, body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudtrail/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudtrail/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..263caffa6dcbe4d84d2a14ee12cc8e096f5e6131 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudtrail/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS Cloudtrail service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cloudtrail.layer1 import CloudTrailConnection + return get_regions('cloudtrail', connection_cls=CloudTrailConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudtrail/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudtrail/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..d2c1b735b6edcac56b8380e8f8df3ede308273ff --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudtrail/exceptions.py @@ -0,0 +1,118 @@ +""" +Exceptions that are specific to the cloudtrail module. +""" +from boto.exception import BotoServerError + + +class InvalidSnsTopicNameException(BotoServerError): + """ + Raised when an invalid SNS topic name is passed to Cloudtrail. + """ + pass + + +class InvalidS3BucketNameException(BotoServerError): + """ + Raised when an invalid S3 bucket name is passed to Cloudtrail. + """ + pass + + +class TrailAlreadyExistsException(BotoServerError): + """ + Raised when the given trail name already exists. + """ + pass + + +class InsufficientSnsTopicPolicyException(BotoServerError): + """ + Raised when the SNS topic does not allow Cloudtrail to post + messages. + """ + pass + + +class InvalidTrailNameException(BotoServerError): + """ + Raised when the trail name is invalid. + """ + pass + + +class InternalErrorException(BotoServerError): + """ + Raised when there was an internal Cloudtrail error. + """ + pass + + +class TrailNotFoundException(BotoServerError): + """ + Raised when the given trail name is not found. + """ + pass + + +class S3BucketDoesNotExistException(BotoServerError): + """ + Raised when the given S3 bucket does not exist. + """ + pass + + +class TrailNotProvidedException(BotoServerError): + """ + Raised when no trail name was provided. + """ + pass + + +class InvalidS3PrefixException(BotoServerError): + """ + Raised when an invalid key prefix is given. + """ + pass + + +class MaximumNumberOfTrailsExceededException(BotoServerError): + """ + Raised when no more trails can be created. + """ + pass + + +class InsufficientS3BucketPolicyException(BotoServerError): + """ + Raised when the S3 bucket does not allow Cloudtrail to + write files into the prefix. + """ + pass + + +class InvalidMaxResultsException(BotoServerError): + pass + + +class InvalidTimeRangeException(BotoServerError): + pass + + +class InvalidLookupAttributesException(BotoServerError): + pass + + +class InvalidCloudWatchLogsLogGroupArnException(BotoServerError): + pass + + +class InvalidCloudWatchLogsRoleArnException(BotoServerError): + pass + + +class CloudWatchLogsDeliveryUnavailableException(BotoServerError): + pass + + +class InvalidNextTokenException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cloudtrail/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/cloudtrail/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..f233f321b52fb495c77883996c9f97915dcb1df7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cloudtrail/layer1.py @@ -0,0 +1,374 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.cloudtrail import exceptions +from boto.compat import json + + +class CloudTrailConnection(AWSQueryConnection): + """ + AWS CloudTrail + This is the CloudTrail API Reference. It provides descriptions of + actions, data types, common parameters, and common errors for + CloudTrail. + + CloudTrail is a web service that records AWS API calls for your + AWS account and delivers log files to an Amazon S3 bucket. The + recorded information includes the identity of the user, the start + time of the AWS API call, the source IP address, the request + parameters, and the response elements returned by the service. + + As an alternative to using the API, you can use one of the AWS + SDKs, which consist of libraries and sample code for various + programming languages and platforms (Java, Ruby, .NET, iOS, + Android, etc.). The SDKs provide a convenient way to create + programmatic access to AWSCloudTrail. For example, the SDKs take + care of cryptographically signing requests, managing errors, and + retrying requests automatically. For information about the AWS + SDKs, including how to download and install them, see the `Tools + for Amazon Web Services page`_. + + See the CloudTrail User Guide for information about the data that + is included with each AWS API call listed in the log files. + """ + APIVersion = "2013-11-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudtrail.us-east-1.amazonaws.com" + ServiceName = "CloudTrail" + TargetPrefix = "com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101" + ResponseError = JSONResponseError + + _faults = { + "InvalidMaxResultsException": exceptions.InvalidMaxResultsException, + "InvalidSnsTopicNameException": exceptions.InvalidSnsTopicNameException, + "InvalidS3BucketNameException": exceptions.InvalidS3BucketNameException, + "TrailAlreadyExistsException": exceptions.TrailAlreadyExistsException, + "InvalidTimeRangeException": exceptions.InvalidTimeRangeException, + "InvalidLookupAttributesException": exceptions.InvalidLookupAttributesException, + "InsufficientSnsTopicPolicyException": exceptions.InsufficientSnsTopicPolicyException, + "InvalidCloudWatchLogsLogGroupArnException": exceptions.InvalidCloudWatchLogsLogGroupArnException, + "InvalidCloudWatchLogsRoleArnException": exceptions.InvalidCloudWatchLogsRoleArnException, + "InvalidTrailNameException": exceptions.InvalidTrailNameException, + "CloudWatchLogsDeliveryUnavailableException": exceptions.CloudWatchLogsDeliveryUnavailableException, + "TrailNotFoundException": exceptions.TrailNotFoundException, + "S3BucketDoesNotExistException": exceptions.S3BucketDoesNotExistException, + "InvalidNextTokenException": exceptions.InvalidNextTokenException, + "InvalidS3PrefixException": exceptions.InvalidS3PrefixException, + "MaximumNumberOfTrailsExceededException": exceptions.MaximumNumberOfTrailsExceededException, + "InsufficientS3BucketPolicyException": exceptions.InsufficientS3BucketPolicyException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CloudTrailConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_trail(self, name, s3_bucket_name, s3_key_prefix=None, + sns_topic_name=None, include_global_service_events=None, + cloud_watch_logs_log_group_arn=None, + cloud_watch_logs_role_arn=None): + """ + From the command line, use `create-subscription`. + + Creates a trail that specifies the settings for delivery of + log data to an Amazon S3 bucket. + + :type name: string + :param name: Specifies the name of the trail. + + :type s3_bucket_name: string + :param s3_bucket_name: Specifies the name of the Amazon S3 bucket + designated for publishing log files. + + :type s3_key_prefix: string + :param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes + the name of the bucket you have designated for log file delivery. + + :type sns_topic_name: string + :param sns_topic_name: Specifies the name of the Amazon SNS topic + defined for notification of log file delivery. + + :type include_global_service_events: boolean + :param include_global_service_events: Specifies whether the trail is + publishing events from global services such as IAM to the log + files. + + :type cloud_watch_logs_log_group_arn: string + :param cloud_watch_logs_log_group_arn: Specifies a log group name using + an Amazon Resource Name (ARN), a unique identifier that represents + the log group to which CloudTrail logs will be delivered. Not + required unless you specify CloudWatchLogsRoleArn. + + :type cloud_watch_logs_role_arn: string + :param cloud_watch_logs_role_arn: Specifies the role for the CloudWatch + Logs endpoint to assume to write to a users log group. + + """ + params = {'Name': name, 'S3BucketName': s3_bucket_name, } + if s3_key_prefix is not None: + params['S3KeyPrefix'] = s3_key_prefix + if sns_topic_name is not None: + params['SnsTopicName'] = sns_topic_name + if include_global_service_events is not None: + params['IncludeGlobalServiceEvents'] = include_global_service_events + if cloud_watch_logs_log_group_arn is not None: + params['CloudWatchLogsLogGroupArn'] = cloud_watch_logs_log_group_arn + if cloud_watch_logs_role_arn is not None: + params['CloudWatchLogsRoleArn'] = cloud_watch_logs_role_arn + return self.make_request(action='CreateTrail', + body=json.dumps(params)) + + def delete_trail(self, name): + """ + Deletes a trail. + + :type name: string + :param name: The name of a trail to be deleted. + + """ + params = {'Name': name, } + return self.make_request(action='DeleteTrail', + body=json.dumps(params)) + + def describe_trails(self, trail_name_list=None): + """ + Retrieves settings for the trail associated with the current + region for your account. + + :type trail_name_list: list + :param trail_name_list: The trail returned. + + """ + params = {} + if trail_name_list is not None: + params['trailNameList'] = trail_name_list + return self.make_request(action='DescribeTrails', + body=json.dumps(params)) + + def get_trail_status(self, name): + """ + Returns a JSON-formatted list of information about the + specified trail. Fields include information on delivery + errors, Amazon SNS and Amazon S3 errors, and start and stop + logging times for each trail. + + :type name: string + :param name: The name of the trail for which you are requesting the + current status. + + """ + params = {'Name': name, } + return self.make_request(action='GetTrailStatus', + body=json.dumps(params)) + + def lookup_events(self, lookup_attributes=None, start_time=None, + end_time=None, max_results=None, next_token=None): + """ + Looks up API activity events captured by CloudTrail that + create, update, or delete resources in your account. Events + for a region can be looked up for the times in which you had + CloudTrail turned on in that region during the last seven + days. Lookup supports five different attributes: time range + (defined by a start time and end time), user name, event name, + resource type, and resource name. All attributes are optional. + The maximum number of attributes that can be specified in any + one lookup request are time range and one other attribute. The + default number of results returned is 10, with a maximum of 50 + possible. The response includes a token that you can use to + get the next page of results. + The rate of lookup requests is limited to one per second per + account. If this limit is exceeded, a throttling error occurs. + Events that occurred during the selected time range will not + be available for lookup if CloudTrail logging was not enabled + when the events occurred. + + :type lookup_attributes: list + :param lookup_attributes: Contains a list of lookup attributes. + Currently the list can contain only one item. + + :type start_time: timestamp + :param start_time: Specifies that only events that occur after or at + the specified time are returned. If the specified start time is + after the specified end time, an error is returned. + + :type end_time: timestamp + :param end_time: Specifies that only events that occur before or at the + specified time are returned. If the specified end time is before + the specified start time, an error is returned. + + :type max_results: integer + :param max_results: The number of events to return. Possible values are + 1 through 50. The default is 10. + + :type next_token: string + :param next_token: The token to use to get the next page of results + after a previous API call. This token must be passed in with the + same parameters that were specified in the the original call. For + example, if the original call specified an AttributeKey of + 'Username' with a value of 'root', the call with NextToken should + include those same parameters. + + """ + params = {} + if lookup_attributes is not None: + params['LookupAttributes'] = lookup_attributes + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if max_results is not None: + params['MaxResults'] = max_results + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='LookupEvents', + body=json.dumps(params)) + + def start_logging(self, name): + """ + Starts the recording of AWS API calls and log file delivery + for a trail. + + :type name: string + :param name: The name of the trail for which CloudTrail logs AWS API + calls. + + """ + params = {'Name': name, } + return self.make_request(action='StartLogging', + body=json.dumps(params)) + + def stop_logging(self, name): + """ + Suspends the recording of AWS API calls and log file delivery + for the specified trail. Under most circumstances, there is no + need to use this action. You can update a trail without + stopping it first. This action is the only way to stop + recording. + + :type name: string + :param name: Communicates to CloudTrail the name of the trail for which + to stop logging AWS API calls. + + """ + params = {'Name': name, } + return self.make_request(action='StopLogging', + body=json.dumps(params)) + + def update_trail(self, name, s3_bucket_name=None, s3_key_prefix=None, + sns_topic_name=None, include_global_service_events=None, + cloud_watch_logs_log_group_arn=None, + cloud_watch_logs_role_arn=None): + """ + From the command line, use `update-subscription`. + + Updates the settings that specify delivery of log files. + Changes to a trail do not require stopping the CloudTrail + service. Use this action to designate an existing bucket for + log delivery. If the existing bucket has previously been a + target for CloudTrail log files, an IAM policy exists for the + bucket. + + :type name: string + :param name: Specifies the name of the trail. + + :type s3_bucket_name: string + :param s3_bucket_name: Specifies the name of the Amazon S3 bucket + designated for publishing log files. + + :type s3_key_prefix: string + :param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes + the name of the bucket you have designated for log file delivery. + + :type sns_topic_name: string + :param sns_topic_name: Specifies the name of the Amazon SNS topic + defined for notification of log file delivery. + + :type include_global_service_events: boolean + :param include_global_service_events: Specifies whether the trail is + publishing events from global services such as IAM to the log + files. + + :type cloud_watch_logs_log_group_arn: string + :param cloud_watch_logs_log_group_arn: Specifies a log group name using + an Amazon Resource Name (ARN), a unique identifier that represents + the log group to which CloudTrail logs will be delivered. Not + required unless you specify CloudWatchLogsRoleArn. + + :type cloud_watch_logs_role_arn: string + :param cloud_watch_logs_role_arn: Specifies the role for the CloudWatch + Logs endpoint to assume to write to a users log group. + + """ + params = {'Name': name, } + if s3_bucket_name is not None: + params['S3BucketName'] = s3_bucket_name + if s3_key_prefix is not None: + params['S3KeyPrefix'] = s3_key_prefix + if sns_topic_name is not None: + params['SnsTopicName'] = sns_topic_name + if include_global_service_events is not None: + params['IncludeGlobalServiceEvents'] = include_global_service_events + if cloud_watch_logs_log_group_arn is not None: + params['CloudWatchLogsLogGroupArn'] = cloud_watch_logs_log_group_arn + if cloud_watch_logs_role_arn is not None: + params['CloudWatchLogsRoleArn'] = cloud_watch_logs_role_arn + return self.make_request(action='UpdateTrail', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/codedeploy/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/codedeploy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..24e1b505d0fcd8bdb17b6b6318913183eb3cc89b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/codedeploy/__init__.py @@ -0,0 +1,40 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS CodeDeploy service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.codedeploy.layer1 import CodeDeployConnection + return get_regions('codedeploy', connection_cls=CodeDeployConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/codedeploy/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/codedeploy/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..f23db8f04136e068ee297e2ba90b1012f83b5726 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/codedeploy/exceptions.py @@ -0,0 +1,199 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class InvalidDeploymentIdException(BotoServerError): + pass + + +class InvalidDeploymentGroupNameException(BotoServerError): + pass + + +class DeploymentConfigAlreadyExistsException(BotoServerError): + pass + + +class InvalidRoleException(BotoServerError): + pass + + +class RoleRequiredException(BotoServerError): + pass + + +class DeploymentGroupAlreadyExistsException(BotoServerError): + pass + + +class DeploymentConfigLimitExceededException(BotoServerError): + pass + + +class InvalidNextTokenException(BotoServerError): + pass + + +class InvalidDeploymentConfigNameException(BotoServerError): + pass + + +class InvalidSortByException(BotoServerError): + pass + + +class InstanceDoesNotExistException(BotoServerError): + pass + + +class InvalidMinimumHealthyHostValueException(BotoServerError): + pass + + +class ApplicationLimitExceededException(BotoServerError): + pass + + +class ApplicationNameRequiredException(BotoServerError): + pass + + +class InvalidEC2TagException(BotoServerError): + pass + + +class DeploymentDoesNotExistException(BotoServerError): + pass + + +class DeploymentLimitExceededException(BotoServerError): + pass + + +class InvalidInstanceStatusException(BotoServerError): + pass + + +class RevisionRequiredException(BotoServerError): + pass + + +class InvalidBucketNameFilterException(BotoServerError): + pass + + +class DeploymentGroupLimitExceededException(BotoServerError): + pass + + +class DeploymentGroupDoesNotExistException(BotoServerError): + pass + + +class DeploymentConfigNameRequiredException(BotoServerError): + pass + + +class DeploymentAlreadyCompletedException(BotoServerError): + pass + + +class RevisionDoesNotExistException(BotoServerError): + pass + + +class DeploymentGroupNameRequiredException(BotoServerError): + pass + + +class DeploymentIdRequiredException(BotoServerError): + pass + + +class DeploymentConfigDoesNotExistException(BotoServerError): + pass + + +class BucketNameFilterRequiredException(BotoServerError): + pass + + +class InvalidTimeRangeException(BotoServerError): + pass + + +class ApplicationDoesNotExistException(BotoServerError): + pass + + +class InvalidRevisionException(BotoServerError): + pass + + +class InvalidSortOrderException(BotoServerError): + pass + + +class InvalidOperationException(BotoServerError): + pass + + +class InvalidAutoScalingGroupException(BotoServerError): + pass + + +class InvalidApplicationNameException(BotoServerError): + pass + + +class DescriptionTooLongException(BotoServerError): + pass + + +class ApplicationAlreadyExistsException(BotoServerError): + pass + + +class InvalidDeployedStateFilterException(BotoServerError): + pass + + +class DeploymentNotStartedException(BotoServerError): + pass + + +class DeploymentConfigInUseException(BotoServerError): + pass + + +class InstanceIdRequiredException(BotoServerError): + pass + + +class InvalidKeyPrefixFilterException(BotoServerError): + pass + + +class InvalidDeploymentStatusException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/codedeploy/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/codedeploy/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..6c61a08342962bfcfda3538b78b0337f33c3cea7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/codedeploy/layer1.py @@ -0,0 +1,899 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.codedeploy import exceptions + + +class CodeDeployConnection(AWSQueryConnection): + """ + AWS CodeDeploy **Overview** + This is the AWS CodeDeploy API Reference. This guide provides + descriptions of the AWS CodeDeploy APIs. For additional + information, see the `AWS CodeDeploy User Guide`_. + **Using the APIs** + You can use the AWS CodeDeploy APIs to work with the following + items: + + + + Applications , which are unique identifiers that AWS CodeDeploy + uses to ensure that the correct combinations of revisions, + deployment configurations, and deployment groups are being + referenced during deployments. You can work with applications by + calling CreateApplication, DeleteApplication, GetApplication, + ListApplications, BatchGetApplications, and UpdateApplication to + create, delete, and get information about applications, and to + change information about an application, respectively. + + Deployment configurations , which are sets of deployment rules + and deployment success and failure conditions that AWS CodeDeploy + uses during deployments. You can work with deployment + configurations by calling CreateDeploymentConfig, + DeleteDeploymentConfig, GetDeploymentConfig, and + ListDeploymentConfigs to create, delete, and get information about + deployment configurations, respectively. + + Deployment groups , which represent groups of Amazon EC2 + instances to which application revisions can be deployed. You can + work with deployment groups by calling CreateDeploymentGroup, + DeleteDeploymentGroup, GetDeploymentGroup, ListDeploymentGroups, + and UpdateDeploymentGroup to create, delete, and get information + about single and multiple deployment groups, and to change + information about a deployment group, respectively. + + Deployment instances (also known simply as instances ), which + represent Amazon EC2 instances to which application revisions are + deployed. Deployment instances are identified by their Amazon EC2 + tags or Auto Scaling group names. Deployment instances belong to + deployment groups. You can work with deployment instances by + calling GetDeploymentInstance and ListDeploymentInstances to get + information about single and multiple deployment instances, + respectively. + + Deployments , which represent the process of deploying revisions + to deployment groups. You can work with deployments by calling + CreateDeployment, GetDeployment, ListDeployments, + BatchGetDeployments, and StopDeployment to create and get + information about deployments, and to stop a deployment, + respectively. + + Application revisions (also known simply as revisions ), which + are archive files that are stored in Amazon S3 buckets or GitHub + repositories. These revisions contain source content (such as + source code, web pages, executable files, any deployment scripts, + and similar) along with an Application Specification file (AppSpec + file). (The AppSpec file is unique to AWS CodeDeploy; it defines a + series of deployment actions that you want AWS CodeDeploy to + execute.) An application revision is uniquely identified by its + Amazon S3 object key and its ETag, version, or both. Application + revisions are deployed to deployment groups. You can work with + application revisions by calling GetApplicationRevision, + ListApplicationRevisions, and RegisterApplicationRevision to get + information about application revisions and to inform AWS + CodeDeploy about an application revision, respectively. + """ + APIVersion = "2014-10-06" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "codedeploy.us-east-1.amazonaws.com" + ServiceName = "codedeploy" + TargetPrefix = "CodeDeploy_20141006" + ResponseError = JSONResponseError + + _faults = { + "InvalidDeploymentIdException": exceptions.InvalidDeploymentIdException, + "InvalidDeploymentGroupNameException": exceptions.InvalidDeploymentGroupNameException, + "DeploymentConfigAlreadyExistsException": exceptions.DeploymentConfigAlreadyExistsException, + "InvalidRoleException": exceptions.InvalidRoleException, + "RoleRequiredException": exceptions.RoleRequiredException, + "DeploymentGroupAlreadyExistsException": exceptions.DeploymentGroupAlreadyExistsException, + "DeploymentConfigLimitExceededException": exceptions.DeploymentConfigLimitExceededException, + "InvalidNextTokenException": exceptions.InvalidNextTokenException, + "InvalidDeploymentConfigNameException": exceptions.InvalidDeploymentConfigNameException, + "InvalidSortByException": exceptions.InvalidSortByException, + "InstanceDoesNotExistException": exceptions.InstanceDoesNotExistException, + "InvalidMinimumHealthyHostValueException": exceptions.InvalidMinimumHealthyHostValueException, + "ApplicationLimitExceededException": exceptions.ApplicationLimitExceededException, + "ApplicationNameRequiredException": exceptions.ApplicationNameRequiredException, + "InvalidEC2TagException": exceptions.InvalidEC2TagException, + "DeploymentDoesNotExistException": exceptions.DeploymentDoesNotExistException, + "DeploymentLimitExceededException": exceptions.DeploymentLimitExceededException, + "InvalidInstanceStatusException": exceptions.InvalidInstanceStatusException, + "RevisionRequiredException": exceptions.RevisionRequiredException, + "InvalidBucketNameFilterException": exceptions.InvalidBucketNameFilterException, + "DeploymentGroupLimitExceededException": exceptions.DeploymentGroupLimitExceededException, + "DeploymentGroupDoesNotExistException": exceptions.DeploymentGroupDoesNotExistException, + "DeploymentConfigNameRequiredException": exceptions.DeploymentConfigNameRequiredException, + "DeploymentAlreadyCompletedException": exceptions.DeploymentAlreadyCompletedException, + "RevisionDoesNotExistException": exceptions.RevisionDoesNotExistException, + "DeploymentGroupNameRequiredException": exceptions.DeploymentGroupNameRequiredException, + "DeploymentIdRequiredException": exceptions.DeploymentIdRequiredException, + "DeploymentConfigDoesNotExistException": exceptions.DeploymentConfigDoesNotExistException, + "BucketNameFilterRequiredException": exceptions.BucketNameFilterRequiredException, + "InvalidTimeRangeException": exceptions.InvalidTimeRangeException, + "ApplicationDoesNotExistException": exceptions.ApplicationDoesNotExistException, + "InvalidRevisionException": exceptions.InvalidRevisionException, + "InvalidSortOrderException": exceptions.InvalidSortOrderException, + "InvalidOperationException": exceptions.InvalidOperationException, + "InvalidAutoScalingGroupException": exceptions.InvalidAutoScalingGroupException, + "InvalidApplicationNameException": exceptions.InvalidApplicationNameException, + "DescriptionTooLongException": exceptions.DescriptionTooLongException, + "ApplicationAlreadyExistsException": exceptions.ApplicationAlreadyExistsException, + "InvalidDeployedStateFilterException": exceptions.InvalidDeployedStateFilterException, + "DeploymentNotStartedException": exceptions.DeploymentNotStartedException, + "DeploymentConfigInUseException": exceptions.DeploymentConfigInUseException, + "InstanceIdRequiredException": exceptions.InstanceIdRequiredException, + "InvalidKeyPrefixFilterException": exceptions.InvalidKeyPrefixFilterException, + "InvalidDeploymentStatusException": exceptions.InvalidDeploymentStatusException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CodeDeployConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def batch_get_applications(self, application_names=None): + """ + Gets information about one or more applications. + + :type application_names: list + :param application_names: A list of application names, with multiple + application names separated by spaces. + + """ + params = {} + if application_names is not None: + params['applicationNames'] = application_names + return self.make_request(action='BatchGetApplications', + body=json.dumps(params)) + + def batch_get_deployments(self, deployment_ids=None): + """ + Gets information about one or more deployments. + + :type deployment_ids: list + :param deployment_ids: A list of deployment IDs, with multiple + deployment IDs separated by spaces. + + """ + params = {} + if deployment_ids is not None: + params['deploymentIds'] = deployment_ids + return self.make_request(action='BatchGetDeployments', + body=json.dumps(params)) + + def create_application(self, application_name): + """ + Creates a new application. + + :type application_name: string + :param application_name: The name of the application. This name must be + unique within the AWS user account. + + """ + params = {'applicationName': application_name, } + return self.make_request(action='CreateApplication', + body=json.dumps(params)) + + def create_deployment(self, application_name, deployment_group_name=None, + revision=None, deployment_config_name=None, + description=None, + ignore_application_stop_failures=None): + """ + Deploys an application revision to the specified deployment + group. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The deployment group's name. + + :type revision: dict + :param revision: The type of revision to deploy, along with information + about the revision's location. + + :type deployment_config_name: string + :param deployment_config_name: The name of an existing deployment + configuration within the AWS user account. + If not specified, the value configured in the deployment group will be + used as the default. If the deployment group does not have a + deployment configuration associated with it, then + CodeDeployDefault.OneAtATime will be used by default. + + :type description: string + :param description: A comment about the deployment. + + :type ignore_application_stop_failures: boolean + :param ignore_application_stop_failures: If set to true, then if the + deployment causes the ApplicationStop deployment lifecycle event to + fail to a specific instance, the deployment will not be considered + to have failed to that instance at that point and will continue on + to the BeforeInstall deployment lifecycle event. + If set to false or not specified, then if the deployment causes the + ApplicationStop deployment lifecycle event to fail to a specific + instance, the deployment will stop to that instance, and the + deployment to that instance will be considered to have failed. + + """ + params = {'applicationName': application_name, } + if deployment_group_name is not None: + params['deploymentGroupName'] = deployment_group_name + if revision is not None: + params['revision'] = revision + if deployment_config_name is not None: + params['deploymentConfigName'] = deployment_config_name + if description is not None: + params['description'] = description + if ignore_application_stop_failures is not None: + params['ignoreApplicationStopFailures'] = ignore_application_stop_failures + return self.make_request(action='CreateDeployment', + body=json.dumps(params)) + + def create_deployment_config(self, deployment_config_name, + minimum_healthy_hosts=None): + """ + Creates a new deployment configuration. + + :type deployment_config_name: string + :param deployment_config_name: The name of the deployment configuration + to create. + + :type minimum_healthy_hosts: dict + :param minimum_healthy_hosts: The minimum number of healthy instances + that should be available at any time during the deployment. There + are two parameters expected in the input: type and value. + The type parameter takes either of the following values: + + + + HOST_COUNT: The value parameter represents the minimum number of + healthy instances, as an absolute value. + + FLEET_PERCENT: The value parameter represents the minimum number of + healthy instances, as a percentage of the total number of instances + in the deployment. If you specify FLEET_PERCENT, then at the start + of the deployment AWS CodeDeploy converts the percentage to the + equivalent number of instances and rounds fractional instances up. + + + The value parameter takes an integer. + + For example, to set a minimum of 95% healthy instances, specify a type + of FLEET_PERCENT and a value of 95. + + """ + params = {'deploymentConfigName': deployment_config_name, } + if minimum_healthy_hosts is not None: + params['minimumHealthyHosts'] = minimum_healthy_hosts + return self.make_request(action='CreateDeploymentConfig', + body=json.dumps(params)) + + def create_deployment_group(self, application_name, + deployment_group_name, + deployment_config_name=None, + ec_2_tag_filters=None, + auto_scaling_groups=None, + service_role_arn=None): + """ + Creates a new deployment group for application revisions to be + deployed to. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The name of an existing deployment group + for the specified application. + + :type deployment_config_name: string + :param deployment_config_name: If specified, the deployment + configuration name must be one of the predefined values, or it can + be a custom deployment configuration: + + + CodeDeployDefault.AllAtOnce deploys an application revision to up to + all of the Amazon EC2 instances at once. The overall deployment + succeeds if the application revision deploys to at least one of the + instances. The overall deployment fails after the application + revision fails to deploy to all of the instances. For example, for + 9 instances, deploy to up to all 9 instances at once. The overall + deployment succeeds if any of the 9 instances is successfully + deployed to, and it fails if all 9 instances fail to be deployed + to. + + CodeDeployDefault.HalfAtATime deploys to up to half of the instances + at a time (with fractions rounded down). The overall deployment + succeeds if the application revision deploys to at least half of + the instances (with fractions rounded up); otherwise, the + deployment fails. For example, for 9 instances, deploy to up to 4 + instances at a time. The overall deployment succeeds if 5 or more + instances are successfully deployed to; otherwise, the deployment + fails. Note that the deployment may successfully deploy to some + instances, even if the overall deployment fails. + + CodeDeployDefault.OneAtATime deploys the application revision to only + one of the instances at a time. The overall deployment succeeds if + the application revision deploys to all of the instances. The + overall deployment fails after the application revision first fails + to deploy to any one instance. For example, for 9 instances, deploy + to one instance at a time. The overall deployment succeeds if all 9 + instances are successfully deployed to, and it fails if any of one + of the 9 instances fail to be deployed to. Note that the deployment + may successfully deploy to some instances, even if the overall + deployment fails. This is the default deployment configuration if a + configuration isn't specified for either the deployment or the + deployment group. + + + To create a custom deployment configuration, call the create deployment + configuration operation. + + :type ec_2_tag_filters: list + :param ec_2_tag_filters: The Amazon EC2 tags to filter on. + + :type auto_scaling_groups: list + :param auto_scaling_groups: A list of associated Auto Scaling groups. + + :type service_role_arn: string + :param service_role_arn: A service role ARN that allows AWS CodeDeploy + to act on the user's behalf when interacting with AWS services. + + """ + params = { + 'applicationName': application_name, + 'deploymentGroupName': deployment_group_name, + } + if deployment_config_name is not None: + params['deploymentConfigName'] = deployment_config_name + if ec_2_tag_filters is not None: + params['ec2TagFilters'] = ec_2_tag_filters + if auto_scaling_groups is not None: + params['autoScalingGroups'] = auto_scaling_groups + if service_role_arn is not None: + params['serviceRoleArn'] = service_role_arn + return self.make_request(action='CreateDeploymentGroup', + body=json.dumps(params)) + + def delete_application(self, application_name): + """ + Deletes an application. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + """ + params = {'applicationName': application_name, } + return self.make_request(action='DeleteApplication', + body=json.dumps(params)) + + def delete_deployment_config(self, deployment_config_name): + """ + Deletes a deployment configuration. + + A deployment configuration cannot be deleted if it is + currently in use. Also, predefined configurations cannot be + deleted. + + :type deployment_config_name: string + :param deployment_config_name: The name of an existing deployment + configuration within the AWS user account. + + """ + params = {'deploymentConfigName': deployment_config_name, } + return self.make_request(action='DeleteDeploymentConfig', + body=json.dumps(params)) + + def delete_deployment_group(self, application_name, + deployment_group_name): + """ + Deletes a deployment group. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The name of an existing deployment group + for the specified application. + + """ + params = { + 'applicationName': application_name, + 'deploymentGroupName': deployment_group_name, + } + return self.make_request(action='DeleteDeploymentGroup', + body=json.dumps(params)) + + def get_application(self, application_name): + """ + Gets information about an application. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + """ + params = {'applicationName': application_name, } + return self.make_request(action='GetApplication', + body=json.dumps(params)) + + def get_application_revision(self, application_name, revision): + """ + Gets information about an application revision. + + :type application_name: string + :param application_name: The name of the application that corresponds + to the revision. + + :type revision: dict + :param revision: Information about the application revision to get, + including the revision's type and its location. + + """ + params = { + 'applicationName': application_name, + 'revision': revision, + } + return self.make_request(action='GetApplicationRevision', + body=json.dumps(params)) + + def get_deployment(self, deployment_id): + """ + Gets information about a deployment. + + :type deployment_id: string + :param deployment_id: An existing deployment ID within the AWS user + account. + + """ + params = {'deploymentId': deployment_id, } + return self.make_request(action='GetDeployment', + body=json.dumps(params)) + + def get_deployment_config(self, deployment_config_name): + """ + Gets information about a deployment configuration. + + :type deployment_config_name: string + :param deployment_config_name: The name of an existing deployment + configuration within the AWS user account. + + """ + params = {'deploymentConfigName': deployment_config_name, } + return self.make_request(action='GetDeploymentConfig', + body=json.dumps(params)) + + def get_deployment_group(self, application_name, deployment_group_name): + """ + Gets information about a deployment group. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The name of an existing deployment group + for the specified application. + + """ + params = { + 'applicationName': application_name, + 'deploymentGroupName': deployment_group_name, + } + return self.make_request(action='GetDeploymentGroup', + body=json.dumps(params)) + + def get_deployment_instance(self, deployment_id, instance_id): + """ + Gets information about an Amazon EC2 instance as part of a + deployment. + + :type deployment_id: string + :param deployment_id: The unique ID of a deployment. + + :type instance_id: string + :param instance_id: The unique ID of an Amazon EC2 instance in the + deployment's deployment group. + + """ + params = { + 'deploymentId': deployment_id, + 'instanceId': instance_id, + } + return self.make_request(action='GetDeploymentInstance', + body=json.dumps(params)) + + def list_application_revisions(self, application_name, sort_by=None, + sort_order=None, s_3_bucket=None, + s_3_key_prefix=None, deployed=None, + next_token=None): + """ + Lists information about revisions for an application. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type sort_by: string + :param sort_by: The column name to sort the list results by: + + + registerTime: Sort the list results by when the revisions were + registered with AWS CodeDeploy. + + firstUsedTime: Sort the list results by when the revisions were first + used by in a deployment. + + lastUsedTime: Sort the list results by when the revisions were last + used in a deployment. + + + If not specified or set to null, the results will be returned in an + arbitrary order. + + :type sort_order: string + :param sort_order: The order to sort the list results by: + + + ascending: Sort the list results in ascending order. + + descending: Sort the list results in descending order. + + + If not specified, the results will be sorted in ascending order. + + If set to null, the results will be sorted in an arbitrary order. + + :type s_3_bucket: string + :param s_3_bucket: A specific Amazon S3 bucket name to limit the search + for revisions. + If set to null, then all of the user's buckets will be searched. + + :type s_3_key_prefix: string + :param s_3_key_prefix: A specific key prefix for the set of Amazon S3 + objects to limit the search for revisions. + + :type deployed: string + :param deployed: + Whether to list revisions based on whether the revision is the target + revision of an deployment group: + + + + include: List revisions that are target revisions of a deployment + group. + + exclude: Do not list revisions that are target revisions of a + deployment group. + + ignore: List all revisions, regardless of whether they are target + revisions of a deployment group. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list application revisions call, which can be used to return the + next set of applications in the list. + + """ + params = {'applicationName': application_name, } + if sort_by is not None: + params['sortBy'] = sort_by + if sort_order is not None: + params['sortOrder'] = sort_order + if s_3_bucket is not None: + params['s3Bucket'] = s_3_bucket + if s_3_key_prefix is not None: + params['s3KeyPrefix'] = s_3_key_prefix + if deployed is not None: + params['deployed'] = deployed + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListApplicationRevisions', + body=json.dumps(params)) + + def list_applications(self, next_token=None): + """ + Lists the applications registered within the AWS user account. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list applications call, which can be used to return the next set of + applications in the list. + + """ + params = {} + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListApplications', + body=json.dumps(params)) + + def list_deployment_configs(self, next_token=None): + """ + Lists the deployment configurations within the AWS user + account. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list deployment configurations call, which can be used to return + the next set of deployment configurations in the list. + + """ + params = {} + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListDeploymentConfigs', + body=json.dumps(params)) + + def list_deployment_groups(self, application_name, next_token=None): + """ + Lists the deployment groups for an application registered + within the AWS user account. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list deployment groups call, which can be used to return the next + set of deployment groups in the list. + + """ + params = {'applicationName': application_name, } + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListDeploymentGroups', + body=json.dumps(params)) + + def list_deployment_instances(self, deployment_id, next_token=None, + instance_status_filter=None): + """ + Lists the Amazon EC2 instances for a deployment within the AWS + user account. + + :type deployment_id: string + :param deployment_id: The unique ID of a deployment. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list deployment instances call, which can be used to return the + next set of deployment instances in the list. + + :type instance_status_filter: list + :param instance_status_filter: + A subset of instances to list, by status: + + + + Pending: Include in the resulting list those instances with pending + deployments. + + InProgress: Include in the resulting list those instances with in- + progress deployments. + + Succeeded: Include in the resulting list those instances with + succeeded deployments. + + Failed: Include in the resulting list those instances with failed + deployments. + + Skipped: Include in the resulting list those instances with skipped + deployments. + + Unknown: Include in the resulting list those instances with + deployments in an unknown state. + + """ + params = {'deploymentId': deployment_id, } + if next_token is not None: + params['nextToken'] = next_token + if instance_status_filter is not None: + params['instanceStatusFilter'] = instance_status_filter + return self.make_request(action='ListDeploymentInstances', + body=json.dumps(params)) + + def list_deployments(self, application_name=None, + deployment_group_name=None, + include_only_statuses=None, create_time_range=None, + next_token=None): + """ + Lists the deployments under a deployment group for an + application registered within the AWS user account. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The name of an existing deployment group + for the specified application. + + :type include_only_statuses: list + :param include_only_statuses: A subset of deployments to list, by + status: + + + Created: Include in the resulting list created deployments. + + Queued: Include in the resulting list queued deployments. + + In Progress: Include in the resulting list in-progress deployments. + + Succeeded: Include in the resulting list succeeded deployments. + + Failed: Include in the resulting list failed deployments. + + Aborted: Include in the resulting list aborted deployments. + + :type create_time_range: dict + :param create_time_range: A deployment creation start- and end-time + range for returning a subset of the list of deployments. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list deployments call, which can be used to return the next set of + deployments in the list. + + """ + params = {} + if application_name is not None: + params['applicationName'] = application_name + if deployment_group_name is not None: + params['deploymentGroupName'] = deployment_group_name + if include_only_statuses is not None: + params['includeOnlyStatuses'] = include_only_statuses + if create_time_range is not None: + params['createTimeRange'] = create_time_range + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListDeployments', + body=json.dumps(params)) + + def register_application_revision(self, application_name, revision, + description=None): + """ + Registers with AWS CodeDeploy a revision for the specified + application. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type description: string + :param description: A comment about the revision. + + :type revision: dict + :param revision: Information about the application revision to + register, including the revision's type and its location. + + """ + params = { + 'applicationName': application_name, + 'revision': revision, + } + if description is not None: + params['description'] = description + return self.make_request(action='RegisterApplicationRevision', + body=json.dumps(params)) + + def stop_deployment(self, deployment_id): + """ + Attempts to stop an ongoing deployment. + + :type deployment_id: string + :param deployment_id: The unique ID of a deployment. + + """ + params = {'deploymentId': deployment_id, } + return self.make_request(action='StopDeployment', + body=json.dumps(params)) + + def update_application(self, application_name=None, + new_application_name=None): + """ + Changes an existing application's name. + + :type application_name: string + :param application_name: The current name of the application that you + want to change. + + :type new_application_name: string + :param new_application_name: The new name that you want to change the + application to. + + """ + params = {} + if application_name is not None: + params['applicationName'] = application_name + if new_application_name is not None: + params['newApplicationName'] = new_application_name + return self.make_request(action='UpdateApplication', + body=json.dumps(params)) + + def update_deployment_group(self, application_name, + current_deployment_group_name, + new_deployment_group_name=None, + deployment_config_name=None, + ec_2_tag_filters=None, + auto_scaling_groups=None, + service_role_arn=None): + """ + Changes information about an existing deployment group. + + :type application_name: string + :param application_name: The application name corresponding to the + deployment group to update. + + :type current_deployment_group_name: string + :param current_deployment_group_name: The current name of the existing + deployment group. + + :type new_deployment_group_name: string + :param new_deployment_group_name: The new name of the deployment group, + if you want to change it. + + :type deployment_config_name: string + :param deployment_config_name: The replacement deployment configuration + name to use, if you want to change it. + + :type ec_2_tag_filters: list + :param ec_2_tag_filters: The replacement set of Amazon EC2 tags to + filter on, if you want to change them. + + :type auto_scaling_groups: list + :param auto_scaling_groups: The replacement list of Auto Scaling groups + to be included in the deployment group, if you want to change them. + + :type service_role_arn: string + :param service_role_arn: A replacement service role's ARN, if you want + to change it. + + """ + params = { + 'applicationName': application_name, + 'currentDeploymentGroupName': current_deployment_group_name, + } + if new_deployment_group_name is not None: + params['newDeploymentGroupName'] = new_deployment_group_name + if deployment_config_name is not None: + params['deploymentConfigName'] = deployment_config_name + if ec_2_tag_filters is not None: + params['ec2TagFilters'] = ec_2_tag_filters + if auto_scaling_groups is not None: + params['autoScalingGroups'] = auto_scaling_groups + if service_role_arn is not None: + params['serviceRoleArn'] = service_role_arn + return self.make_request(action='UpdateDeploymentGroup', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cognito/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/cognito/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70cc23febffdfb7a2de035d163e75a400a9c82ee --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cognito/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cognito/identity/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/cognito/identity/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e58b480b3f4e5395b0202d1beec2dfabcad12709 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cognito/identity/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon Cognito Identity service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cognito.identity.layer1 import CognitoIdentityConnection + return get_regions('cognito-identity', + connection_cls=CognitoIdentityConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cognito/identity/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/cognito/identity/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..b5c1236d7e3cd633420d8139b638d02e706e5c42 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cognito/identity/exceptions.py @@ -0,0 +1,44 @@ +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class LimitExceededException(BotoServerError): + pass + + +class ResourceConflictException(BotoServerError): + pass + + +class DeveloperUserAlreadyRegisteredException(BotoServerError): + pass + + +class TooManyRequestsException(BotoServerError): + pass + + +class InvalidParameterException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class InternalErrorException(BotoServerError): + pass + + +class NotAuthorizedException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cognito/identity/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/cognito/identity/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..a7363d5b5c623fa2a7daecadcecdde9639ed53ad --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cognito/identity/layer1.py @@ -0,0 +1,549 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.cognito.identity import exceptions + + +class CognitoIdentityConnection(AWSQueryConnection): + """ + Amazon Cognito + Amazon Cognito is a web service that delivers scoped temporary + credentials to mobile devices and other untrusted environments. + Amazon Cognito uniquely identifies a device and supplies the user + with a consistent identity over the lifetime of an application. + + Using Amazon Cognito, you can enable authentication with one or + more third-party identity providers (Facebook, Google, or Login + with Amazon), and you can also choose to support unauthenticated + access from your app. Cognito delivers a unique identifier for + each user and acts as an OpenID token provider trusted by AWS + Security Token Service (STS) to access temporary, limited- + privilege AWS credentials. + + To provide end-user credentials, first make an unsigned call to + GetId. If the end user is authenticated with one of the supported + identity providers, set the `Logins` map with the identity + provider token. `GetId` returns a unique identifier for the user. + + Next, make an unsigned call to GetOpenIdToken, which returns the + OpenID token necessary to call STS and retrieve AWS credentials. + This call expects the same `Logins` map as the `GetId` call, as + well as the `IdentityID` originally returned by `GetId`. The token + returned by `GetOpenIdToken` can be passed to the STS operation + `AssumeRoleWithWebIdentity`_ to retrieve AWS credentials. + """ + APIVersion = "2014-06-30" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cognito-identity.us-east-1.amazonaws.com" + ServiceName = "CognitoIdentity" + TargetPrefix = "AWSCognitoIdentityService" + ResponseError = JSONResponseError + + _faults = { + "LimitExceededException": exceptions.LimitExceededException, + "ResourceConflictException": exceptions.ResourceConflictException, + "DeveloperUserAlreadyRegisteredException": exceptions.DeveloperUserAlreadyRegisteredException, + "TooManyRequestsException": exceptions.TooManyRequestsException, + "InvalidParameterException": exceptions.InvalidParameterException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InternalErrorException": exceptions.InternalErrorException, + "NotAuthorizedException": exceptions.NotAuthorizedException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CognitoIdentityConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_identity_pool(self, identity_pool_name, + allow_unauthenticated_identities, + supported_login_providers=None, + developer_provider_name=None, + open_id_connect_provider_ar_ns=None): + """ + Creates a new identity pool. The identity pool is a store of + user identity information that is specific to your AWS + account. The limit on identity pools is 60 per account. + + :type identity_pool_name: string + :param identity_pool_name: A string that you provide. + + :type allow_unauthenticated_identities: boolean + :param allow_unauthenticated_identities: TRUE if the identity pool + supports unauthenticated logins. + + :type supported_login_providers: map + :param supported_login_providers: Optional key:value pairs mapping + provider names to provider app IDs. + + :type developer_provider_name: string + :param developer_provider_name: The "domain" by which Cognito will + refer to your users. This name acts as a placeholder that allows + your backend and the Cognito service to communicate about the + developer provider. For the `DeveloperProviderName`, you can use + letters as well as period ( `.`), underscore ( `_`), and dash ( + `-`). + Once you have set a developer provider name, you cannot change it. + Please take care in setting this parameter. + + :type open_id_connect_provider_ar_ns: list + :param open_id_connect_provider_ar_ns: + + """ + params = { + 'IdentityPoolName': identity_pool_name, + 'AllowUnauthenticatedIdentities': allow_unauthenticated_identities, + } + if supported_login_providers is not None: + params['SupportedLoginProviders'] = supported_login_providers + if developer_provider_name is not None: + params['DeveloperProviderName'] = developer_provider_name + if open_id_connect_provider_ar_ns is not None: + params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns + return self.make_request(action='CreateIdentityPool', + body=json.dumps(params)) + + def delete_identity_pool(self, identity_pool_id): + """ + Deletes a user pool. Once a pool is deleted, users will not be + able to authenticate with the pool. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + """ + params = {'IdentityPoolId': identity_pool_id, } + return self.make_request(action='DeleteIdentityPool', + body=json.dumps(params)) + + def describe_identity_pool(self, identity_pool_id): + """ + Gets details about a particular identity pool, including the + pool name, ID description, creation date, and current number + of users. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + """ + params = {'IdentityPoolId': identity_pool_id, } + return self.make_request(action='DescribeIdentityPool', + body=json.dumps(params)) + + def get_id(self, account_id, identity_pool_id, logins=None): + """ + Generates (or retrieves) a Cognito ID. Supplying multiple + logins will create an implicit linked account. + + :type account_id: string + :param account_id: A standard AWS account ID (9+ digits). + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type logins: map + :param logins: A set of optional name-value pairs that map provider + names to provider tokens. + The available provider names for `Logins` are as follows: + + + Facebook: `graph.facebook.com` + + Google: `accounts.google.com` + + Amazon: `www.amazon.com` + + """ + params = { + 'AccountId': account_id, + 'IdentityPoolId': identity_pool_id, + } + if logins is not None: + params['Logins'] = logins + return self.make_request(action='GetId', + body=json.dumps(params)) + + def get_open_id_token(self, identity_id, logins=None): + """ + Gets an OpenID token, using a known Cognito ID. This known + Cognito ID is returned by GetId. You can optionally add + additional logins for the identity. Supplying multiple logins + creates an implicit link. + + The OpenId token is valid for 15 minutes. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type logins: map + :param logins: A set of optional name-value pairs that map provider + names to provider tokens. + + """ + params = {'IdentityId': identity_id, } + if logins is not None: + params['Logins'] = logins + return self.make_request(action='GetOpenIdToken', + body=json.dumps(params)) + + def get_open_id_token_for_developer_identity(self, identity_pool_id, + logins, identity_id=None, + token_duration=None): + """ + Registers (or retrieves) a Cognito `IdentityId` and an OpenID + Connect token for a user authenticated by your backend + authentication process. Supplying multiple logins will create + an implicit linked account. You can only specify one developer + provider as part of the `Logins` map, which is linked to the + identity pool. The developer provider is the "domain" by which + Cognito will refer to your users. + + You can use `GetOpenIdTokenForDeveloperIdentity` to create a + new identity and to link new logins (that is, user credentials + issued by a public provider or developer provider) to an + existing identity. When you want to create a new identity, the + `IdentityId` should be null. When you want to associate a new + login with an existing authenticated/unauthenticated identity, + you can do so by providing the existing `IdentityId`. This API + will create the identity in the specified `IdentityPoolId`. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type logins: map + :param logins: A set of optional name-value pairs that map provider + names to provider tokens. Each name-value pair represents a user + from a public provider or developer provider. If the user is from a + developer provider, the name-value pair will follow the syntax + `"developer_provider_name": "developer_user_identifier"`. The + developer provider is the "domain" by which Cognito will refer to + your users; you provided this domain while creating/updating the + identity pool. The developer user identifier is an identifier from + your backend that uniquely identifies a user. When you create an + identity pool, you can specify the supported logins. + + :type token_duration: long + :param token_duration: The expiration time of the token, in seconds. + You can specify a custom expiration time for the token so that you + can cache it. If you don't provide an expiration time, the token is + valid for 15 minutes. You can exchange the token with Amazon STS + for temporary AWS credentials, which are valid for a maximum of one + hour. The maximum token duration you can set is 24 hours. You + should take care in setting the expiration time for a token, as + there are significant security implications: an attacker could use + a leaked token to access your AWS resources for the token's + duration. + + """ + params = { + 'IdentityPoolId': identity_pool_id, + 'Logins': logins, + } + if identity_id is not None: + params['IdentityId'] = identity_id + if token_duration is not None: + params['TokenDuration'] = token_duration + return self.make_request(action='GetOpenIdTokenForDeveloperIdentity', + body=json.dumps(params)) + + def list_identities(self, identity_pool_id, max_results, next_token=None): + """ + Lists the identities in a pool. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type max_results: integer + :param max_results: The maximum number of identities to return. + + :type next_token: string + :param next_token: A pagination token. + + """ + params = { + 'IdentityPoolId': identity_pool_id, + 'MaxResults': max_results, + } + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListIdentities', + body=json.dumps(params)) + + def list_identity_pools(self, max_results, next_token=None): + """ + Lists all of the Cognito identity pools registered for your + account. + + :type max_results: integer + :param max_results: The maximum number of identities to return. + + :type next_token: string + :param next_token: A pagination token. + + """ + params = {'MaxResults': max_results, } + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListIdentityPools', + body=json.dumps(params)) + + def lookup_developer_identity(self, identity_pool_id, identity_id=None, + developer_user_identifier=None, + max_results=None, next_token=None): + """ + Retrieves the `IdentityID` associated with a + `DeveloperUserIdentifier` or the list of + `DeveloperUserIdentifier`s associated with an `IdentityId` for + an existing identity. Either `IdentityID` or + `DeveloperUserIdentifier` must not be null. If you supply only + one of these values, the other value will be searched in the + database and returned as a part of the response. If you supply + both, `DeveloperUserIdentifier` will be matched against + `IdentityID`. If the values are verified against the database, + the response returns both values and is the same as the + request. Otherwise a `ResourceConflictException` is thrown. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type developer_user_identifier: string + :param developer_user_identifier: A unique ID used by your backend + authentication process to identify a user. Typically, a developer + identity provider would issue many developer user identifiers, in + keeping with the number of users. + + :type max_results: integer + :param max_results: The maximum number of identities to return. + + :type next_token: string + :param next_token: A pagination token. The first call you make will + have `NextToken` set to null. After that the service will return + `NextToken` values as needed. For example, let's say you make a + request with `MaxResults` set to 10, and there are 20 matches in + the database. The service will return a pagination token as a part + of the response. This token can be used to call the API again and + get results starting from the 11th match. + + """ + params = {'IdentityPoolId': identity_pool_id, } + if identity_id is not None: + params['IdentityId'] = identity_id + if developer_user_identifier is not None: + params['DeveloperUserIdentifier'] = developer_user_identifier + if max_results is not None: + params['MaxResults'] = max_results + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='LookupDeveloperIdentity', + body=json.dumps(params)) + + def merge_developer_identities(self, source_user_identifier, + destination_user_identifier, + developer_provider_name, identity_pool_id): + """ + Merges two users having different `IdentityId`s, existing in + the same identity pool, and identified by the same developer + provider. You can use this action to request that discrete + users be merged and identified as a single user in the Cognito + environment. Cognito associates the given source user ( + `SourceUserIdentifier`) with the `IdentityId` of the + `DestinationUserIdentifier`. Only developer-authenticated + users can be merged. If the users to be merged are associated + with the same public provider, but as two different users, an + exception will be thrown. + + :type source_user_identifier: string + :param source_user_identifier: User identifier for the source user. The + value should be a `DeveloperUserIdentifier`. + + :type destination_user_identifier: string + :param destination_user_identifier: User identifier for the destination + user. The value should be a `DeveloperUserIdentifier`. + + :type developer_provider_name: string + :param developer_provider_name: The "domain" by which Cognito will + refer to your users. This is a (pseudo) domain name that you + provide while creating an identity pool. This name acts as a + placeholder that allows your backend and the Cognito service to + communicate about the developer provider. For the + `DeveloperProviderName`, you can use letters as well as period (.), + underscore (_), and dash (-). + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + """ + params = { + 'SourceUserIdentifier': source_user_identifier, + 'DestinationUserIdentifier': destination_user_identifier, + 'DeveloperProviderName': developer_provider_name, + 'IdentityPoolId': identity_pool_id, + } + return self.make_request(action='MergeDeveloperIdentities', + body=json.dumps(params)) + + def unlink_developer_identity(self, identity_id, identity_pool_id, + developer_provider_name, + developer_user_identifier): + """ + Unlinks a `DeveloperUserIdentifier` from an existing identity. + Unlinked developer users will be considered new identities + next time they are seen. If, for a given Cognito identity, you + remove all federated identities as well as the developer user + identifier, the Cognito identity becomes inaccessible. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type developer_provider_name: string + :param developer_provider_name: The "domain" by which Cognito will + refer to your users. + + :type developer_user_identifier: string + :param developer_user_identifier: A unique ID used by your backend + authentication process to identify a user. + + """ + params = { + 'IdentityId': identity_id, + 'IdentityPoolId': identity_pool_id, + 'DeveloperProviderName': developer_provider_name, + 'DeveloperUserIdentifier': developer_user_identifier, + } + return self.make_request(action='UnlinkDeveloperIdentity', + body=json.dumps(params)) + + def unlink_identity(self, identity_id, logins, logins_to_remove): + """ + Unlinks a federated identity from an existing account. + Unlinked logins will be considered new identities next time + they are seen. Removing the last linked login will make this + identity inaccessible. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type logins: map + :param logins: A set of optional name-value pairs that map provider + names to provider tokens. + + :type logins_to_remove: list + :param logins_to_remove: Provider names to unlink from this identity. + + """ + params = { + 'IdentityId': identity_id, + 'Logins': logins, + 'LoginsToRemove': logins_to_remove, + } + return self.make_request(action='UnlinkIdentity', + body=json.dumps(params)) + + def update_identity_pool(self, identity_pool_id, identity_pool_name, + allow_unauthenticated_identities, + supported_login_providers=None, + developer_provider_name=None, + open_id_connect_provider_ar_ns=None): + """ + Updates a user pool. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type identity_pool_name: string + :param identity_pool_name: A string that you provide. + + :type allow_unauthenticated_identities: boolean + :param allow_unauthenticated_identities: TRUE if the identity pool + supports unauthenticated logins. + + :type supported_login_providers: map + :param supported_login_providers: Optional key:value pairs mapping + provider names to provider app IDs. + + :type developer_provider_name: string + :param developer_provider_name: The "domain" by which Cognito will + refer to your users. + + :type open_id_connect_provider_ar_ns: list + :param open_id_connect_provider_ar_ns: + + """ + params = { + 'IdentityPoolId': identity_pool_id, + 'IdentityPoolName': identity_pool_name, + 'AllowUnauthenticatedIdentities': allow_unauthenticated_identities, + } + if supported_login_providers is not None: + params['SupportedLoginProviders'] = supported_login_providers + if developer_provider_name is not None: + params['DeveloperProviderName'] = developer_provider_name + if open_id_connect_provider_ar_ns is not None: + params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns + return self.make_request(action='UpdateIdentityPool', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cognito/sync/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/cognito/sync/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d48bd18ee71904850ad8cd0ce3aacd0008dbb92 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cognito/sync/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon Cognito Sync service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cognito.sync.layer1 import CognitoSyncConnection + return get_regions('cognito-sync', connection_cls=CognitoSyncConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cognito/sync/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/cognito/sync/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..3e83c3ca8f17e28ab666cefeab1186d2171f229a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cognito/sync/exceptions.py @@ -0,0 +1,54 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class LimitExceededException(BotoServerError): + pass + + +class ResourceConflictException(BotoServerError): + pass + + +class InvalidConfigurationException(BotoServerError): + pass + + +class TooManyRequestsException(BotoServerError): + pass + + +class InvalidParameterException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class InternalErrorException(BotoServerError): + pass + + +class NotAuthorizedException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/cognito/sync/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/cognito/sync/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..59e9d953cd6589704c55aaaf847b81ef6fff746c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/cognito/sync/layer1.py @@ -0,0 +1,494 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import json +from boto.exception import JSONResponseError +from boto.connection import AWSAuthConnection +from boto.regioninfo import RegionInfo +from boto.cognito.sync import exceptions + + +class CognitoSyncConnection(AWSAuthConnection): + """ + Amazon Cognito Sync + Amazon Cognito Sync provides an AWS service and client library + that enable cross-device syncing of application-related user data. + High-level client libraries are available for both iOS and + Android. You can use these libraries to persist data locally so + that it's available even if the device is offline. Developer + credentials don't need to be stored on the mobile device to access + the service. You can use Amazon Cognito to obtain a normalized + user ID and credentials. User data is persisted in a dataset that + can store up to 1 MB of key-value pairs, and you can have up to 20 + datasets per user identity. + + With Amazon Cognito Sync, the data stored for each identity is + accessible only to credentials assigned to that identity. In order + to use the Cognito Sync service, you need to make API calls using + credentials retrieved with `Amazon Cognito Identity service`_. + """ + APIVersion = "2014-06-30" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cognito-sync.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "LimitExceededException": exceptions.LimitExceededException, + "ResourceConflictException": exceptions.ResourceConflictException, + "InvalidConfigurationException": exceptions.InvalidConfigurationException, + "TooManyRequestsException": exceptions.TooManyRequestsException, + "InvalidParameterException": exceptions.InvalidParameterException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InternalErrorException": exceptions.InternalErrorException, + "NotAuthorizedException": exceptions.NotAuthorizedException, + } + + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + else: + del kwargs['region'] + kwargs['host'] = region.endpoint + super(CognitoSyncConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def delete_dataset(self, identity_pool_id, identity_id, dataset_name): + """ + Deletes the specific dataset. The dataset will be deleted + permanently, and the action can't be undone. Datasets that + this dataset was merged with will no longer report the merge. + Any consequent operation on this dataset will result in a + ResourceNotFoundException. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type identity_id: string + :param identity_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type dataset_name: string + :param dataset_name: A string of up to 128 characters. Allowed + characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' + (dot). + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format( + identity_pool_id, identity_id, dataset_name) + return self.make_request('DELETE', uri, expected_status=200) + + def describe_dataset(self, identity_pool_id, identity_id, dataset_name): + """ + Gets metadata about a dataset by identity and dataset name. + The credentials used to make this API call need to have access + to the identity data. With Amazon Cognito Sync, each identity + has access only to its own data. You should use Amazon Cognito + Identity service to retrieve the credentials necessary to make + this API call. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type identity_id: string + :param identity_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type dataset_name: string + :param dataset_name: A string of up to 128 characters. Allowed + characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' + (dot). + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format( + identity_pool_id, identity_id, dataset_name) + return self.make_request('GET', uri, expected_status=200) + + def describe_identity_pool_usage(self, identity_pool_id): + """ + Gets usage details (for example, data storage) about a + particular identity pool. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + """ + + uri = '/identitypools/{0}'.format(identity_pool_id) + return self.make_request('GET', uri, expected_status=200) + + def describe_identity_usage(self, identity_pool_id, identity_id): + """ + Gets usage information for an identity, including number of + datasets and data usage. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type identity_id: string + :param identity_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + """ + + uri = '/identitypools/{0}/identities/{1}'.format( + identity_pool_id, identity_id) + return self.make_request('GET', uri, expected_status=200) + + def get_identity_pool_configuration(self, identity_pool_id): + """ + Gets the configuration settings of an identity pool. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. This is the ID of the pool for which to return a + configuration. + + """ + + uri = '/identitypools/{0}/configuration'.format(identity_pool_id) + return self.make_request('GET', uri, expected_status=200) + + def list_datasets(self, identity_pool_id, identity_id, next_token=None, + max_results=None): + """ + Lists datasets for an identity. The credentials used to make + this API call need to have access to the identity data. With + Amazon Cognito Sync, each identity has access only to its own + data. You should use Amazon Cognito Identity service to + retrieve the credentials necessary to make this API call. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type identity_id: string + :param identity_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type next_token: string + :param next_token: A pagination token for obtaining the next page of + results. + + :type max_results: integer + :param max_results: The maximum number of results to be returned. + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets'.format( + identity_pool_id, identity_id) + params = {} + headers = {} + query_params = {} + if next_token is not None: + query_params['nextToken'] = next_token + if max_results is not None: + query_params['maxResults'] = max_results + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def list_identity_pool_usage(self, next_token=None, max_results=None): + """ + Gets a list of identity pools registered with Cognito. + + :type next_token: string + :param next_token: A pagination token for obtaining the next page of + results. + + :type max_results: integer + :param max_results: The maximum number of results to be returned. + + """ + + uri = '/identitypools' + params = {} + headers = {} + query_params = {} + if next_token is not None: + query_params['nextToken'] = next_token + if max_results is not None: + query_params['maxResults'] = max_results + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def list_records(self, identity_pool_id, identity_id, dataset_name, + last_sync_count=None, next_token=None, max_results=None, + sync_session_token=None): + """ + Gets paginated records, optionally changed after a particular + sync count for a dataset and identity. The credentials used to + make this API call need to have access to the identity data. + With Amazon Cognito Sync, each identity has access only to its + own data. You should use Amazon Cognito Identity service to + retrieve the credentials necessary to make this API call. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type identity_id: string + :param identity_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type dataset_name: string + :param dataset_name: A string of up to 128 characters. Allowed + characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' + (dot). + + :type last_sync_count: long + :param last_sync_count: The last server sync count for this record. + + :type next_token: string + :param next_token: A pagination token for obtaining the next page of + results. + + :type max_results: integer + :param max_results: The maximum number of results to be returned. + + :type sync_session_token: string + :param sync_session_token: A token containing a session ID, identity + ID, and expiration. + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}/records'.format( + identity_pool_id, identity_id, dataset_name) + params = {} + headers = {} + query_params = {} + if last_sync_count is not None: + query_params['lastSyncCount'] = last_sync_count + if next_token is not None: + query_params['nextToken'] = next_token + if max_results is not None: + query_params['maxResults'] = max_results + if sync_session_token is not None: + query_params['syncSessionToken'] = sync_session_token + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def register_device(self, identity_pool_id, identity_id, platform, token): + """ + Registers a device to receive push sync notifications. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. Here, the ID of the pool that the identity belongs to. + + :type identity_id: string + :param identity_id: The unique ID for this identity. + + :type platform: string + :param platform: The SNS platform type (e.g. GCM, SDM, APNS, + APNS_SANDBOX). + + :type token: string + :param token: The push token. + + """ + + uri = '/identitypools/{0}/identity/{1}/device'.format( + identity_pool_id, identity_id) + params = {'Platform': platform, 'Token': token, } + headers = {} + query_params = {} + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def set_identity_pool_configuration(self, identity_pool_id, + push_sync=None): + """ + Sets the necessary configuration for push sync. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. This is the ID of the pool to modify. + + :type push_sync: dict + :param push_sync: Configuration options to be applied to the identity + pool. + + """ + + uri = '/identitypools/{0}/configuration'.format(identity_pool_id) + params = {} + headers = {} + query_params = {} + if push_sync is not None: + params['PushSync'] = push_sync + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def subscribe_to_dataset(self, identity_pool_id, identity_id, + dataset_name, device_id): + """ + Subscribes to receive notifications when a dataset is modified + by another device. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. The ID of the pool to which the identity belongs. + + :type identity_id: string + :param identity_id: Unique ID for this identity. + + :type dataset_name: string + :param dataset_name: The name of the dataset to subcribe to. + + :type device_id: string + :param device_id: The unique ID generated for this device by Cognito. + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}/subscriptions/{3}'.format( + identity_pool_id, identity_id, dataset_name, device_id) + return self.make_request('POST', uri, expected_status=200) + + def unsubscribe_from_dataset(self, identity_pool_id, identity_id, + dataset_name, device_id): + """ + Unsubscribe from receiving notifications when a dataset is + modified by another device. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. The ID of the pool to which this identity belongs. + + :type identity_id: string + :param identity_id: Unique ID for this identity. + + :type dataset_name: string + :param dataset_name: The name of the dataset from which to unsubcribe. + + :type device_id: string + :param device_id: The unique ID generated for this device by Cognito. + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}/subscriptions/{3}'.format( + identity_pool_id, identity_id, dataset_name, device_id) + return self.make_request('DELETE', uri, expected_status=200) + + def update_records(self, identity_pool_id, identity_id, dataset_name, + sync_session_token, device_id=None, + record_patches=None, client_context=None): + """ + Posts updates to records and add and delete records for a + dataset and user. The credentials used to make this API call + need to have access to the identity data. With Amazon Cognito + Sync, each identity has access only to its own data. You + should use Amazon Cognito Identity service to retrieve the + credentials necessary to make this API call. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type identity_id: string + :param identity_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type dataset_name: string + :param dataset_name: A string of up to 128 characters. Allowed + characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' + (dot). + + :type device_id: string + :param device_id: The unique ID generated for this device by Cognito. + + :type record_patches: list + :param record_patches: A list of patch operations. + + :type sync_session_token: string + :param sync_session_token: The SyncSessionToken returned by a previous + call to ListRecords for this dataset and identity. + + :type client_context: string + :param client_context: Intended to supply a device ID that will + populate the `lastModifiedBy` field referenced in other methods. + The `ClientContext` field is not yet implemented. + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format( + identity_pool_id, identity_id, dataset_name) + params = {'SyncSessionToken': sync_session_token, } + headers = {} + query_params = {} + if device_id is not None: + params['DeviceId'] = device_id + if record_patches is not None: + params['RecordPatches'] = record_patches + if client_context is not None: + headers['x-amz-Client-Context'] = client_context + if client_context is not None: + headers['x-amz-Client-Context'] = client_context + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def make_request(self, verb, resource, headers=None, data='', + expected_status=None, params=None): + if headers is None: + headers = {} + response = AWSAuthConnection.make_request( + self, verb, resource, headers=headers, data=data, params=params) + body = json.loads(response.read().decode('utf-8')) + if response.status == expected_status: + return body + else: + error_type = response.getheader('x-amzn-ErrorType').split(':')[0] + error_class = self._faults.get(error_type, self.ResponseError) + raise error_class(response.status, response.reason, body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/compat.py b/desktop/core/ext-py/boto-2.38.0/boto/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..a7503f013b4780b2ae1ee46b5bcfaac631f1b6a1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/compat.py @@ -0,0 +1,67 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os + +# This allows boto modules to say "from boto.compat import json". This is +# preferred so that all modules don't have to repeat this idiom. +try: + import simplejson as json +except ImportError: + import json + + +# Switch to use encodebytes, which deprecates encodestring in Python 3 +try: + from base64 import encodebytes +except ImportError: + from base64 import encodestring as encodebytes + + +# If running in Google App Engine there is no "user" and +# os.path.expanduser() will fail. Attempt to detect this case and use a +# no-op expanduser function in this case. +try: + os.path.expanduser('~') + expanduser = os.path.expanduser +except (AttributeError, ImportError): + # This is probably running on App Engine. + expanduser = (lambda x: x) + +from boto.vendored import six + +from boto.vendored.six import BytesIO, StringIO +from boto.vendored.six.moves import filter, http_client, map, _thread, \ + urllib, zip +from boto.vendored.six.moves.queue import Queue +from boto.vendored.six.moves.urllib.parse import parse_qs, quote, unquote, \ + urlparse, urlsplit +from boto.vendored.six.moves.urllib.request import urlopen + +if six.PY3: + # StandardError was removed, so use the base exception type instead + StandardError = Exception + long_type = int + from configparser import ConfigParser +else: + StandardError = StandardError + long_type = long + from ConfigParser import SafeConfigParser as ConfigParser diff --git a/desktop/core/ext-py/boto-2.38.0/boto/configservice/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/configservice/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d18f8f8e71a82a1989ff1c2754e4c2db7a2b7704 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/configservice/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS Config service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.configservice.layer1 import ConfigServiceConnection + return get_regions('configservice', connection_cls=ConfigServiceConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/configservice/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/configservice/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..58aa550f9cd9b165a42667b8d7135033fda20174 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/configservice/exceptions.py @@ -0,0 +1,103 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class InvalidLimitException(BotoServerError): + pass + + +class NoSuchBucketException(BotoServerError): + pass + + +class InvalidSNSTopicARNException(BotoServerError): + pass + + +class ResourceNotDiscoveredException(BotoServerError): + pass + + +class MaxNumberOfDeliveryChannelsExceededException(BotoServerError): + pass + + +class LastDeliveryChannelDeleteFailedException(BotoServerError): + pass + + +class InsufficientDeliveryPolicyException(BotoServerError): + pass + + +class InvalidRoleException(BotoServerError): + pass + + +class InvalidTimeRangeException(BotoServerError): + pass + + +class NoSuchDeliveryChannelException(BotoServerError): + pass + + +class NoSuchConfigurationRecorderException(BotoServerError): + pass + + +class InvalidS3KeyPrefixException(BotoServerError): + pass + + +class InvalidDeliveryChannelNameException(BotoServerError): + pass + + +class NoRunningConfigurationRecorderException(BotoServerError): + pass + + +class ValidationException(BotoServerError): + pass + + +class NoAvailableConfigurationRecorderException(BotoServerError): + pass + + +class InvalidNextTokenException(BotoServerError): + pass + + +class InvalidConfigurationRecorderNameException(BotoServerError): + pass + + +class NoAvailableDeliveryChannelException(BotoServerError): + pass + + +class MaxNumberOfConfigurationRecordersExceededException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/configservice/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/configservice/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..fe598d98807b23ce6cd2ea02d1ed316da258ada2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/configservice/layer1.py @@ -0,0 +1,381 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.configservice import exceptions + + +class ConfigServiceConnection(AWSQueryConnection): + """ + AWS Config + AWS Config provides a way to keep track of the configurations of + all the AWS resources associated with your AWS account. You can + use AWS Config to get the current and historical configurations of + each AWS resource and also to get information about the + relationship between the resources. An AWS resource can be an + Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store + (EBS) volume, an Elastic network Interface (ENI), or a security + group. For a complete list of resources currently supported by AWS + Config, see `Supported AWS Resources`_. + + You can access and manage AWS Config through the AWS Management + Console, the AWS Command Line Interface (AWS CLI), the AWS Config + API, or the AWS SDKs for AWS Config + + This reference guide contains documentation for the AWS Config API + and the AWS CLI commands that you can use to manage AWS Config. + + The AWS Config API uses the Signature Version 4 protocol for + signing requests. For more information about how to sign a request + with this protocol, see `Signature Version 4 Signing Process`_. + + For detailed information about AWS Config features and their + associated actions or commands, as well as how to work with AWS + Management Console, see `What Is AWS Config?`_ in the AWS Config + Developer Guide . + """ + APIVersion = "2014-11-12" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "config.us-east-1.amazonaws.com" + ServiceName = "ConfigService" + TargetPrefix = "StarlingDoveService" + ResponseError = JSONResponseError + + _faults = { + "InvalidLimitException": exceptions.InvalidLimitException, + "NoSuchBucketException": exceptions.NoSuchBucketException, + "InvalidSNSTopicARNException": exceptions.InvalidSNSTopicARNException, + "ResourceNotDiscoveredException": exceptions.ResourceNotDiscoveredException, + "MaxNumberOfDeliveryChannelsExceededException": exceptions.MaxNumberOfDeliveryChannelsExceededException, + "LastDeliveryChannelDeleteFailedException": exceptions.LastDeliveryChannelDeleteFailedException, + "InsufficientDeliveryPolicyException": exceptions.InsufficientDeliveryPolicyException, + "InvalidRoleException": exceptions.InvalidRoleException, + "InvalidTimeRangeException": exceptions.InvalidTimeRangeException, + "NoSuchDeliveryChannelException": exceptions.NoSuchDeliveryChannelException, + "NoSuchConfigurationRecorderException": exceptions.NoSuchConfigurationRecorderException, + "InvalidS3KeyPrefixException": exceptions.InvalidS3KeyPrefixException, + "InvalidDeliveryChannelNameException": exceptions.InvalidDeliveryChannelNameException, + "NoRunningConfigurationRecorderException": exceptions.NoRunningConfigurationRecorderException, + "ValidationException": exceptions.ValidationException, + "NoAvailableConfigurationRecorderException": exceptions.NoAvailableConfigurationRecorderException, + "InvalidNextTokenException": exceptions.InvalidNextTokenException, + "InvalidConfigurationRecorderNameException": exceptions.InvalidConfigurationRecorderNameException, + "NoAvailableDeliveryChannelException": exceptions.NoAvailableDeliveryChannelException, + "MaxNumberOfConfigurationRecordersExceededException": exceptions.MaxNumberOfConfigurationRecordersExceededException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(ConfigServiceConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def delete_delivery_channel(self, delivery_channel_name): + """ + Deletes the specified delivery channel. + + The delivery channel cannot be deleted if it is the only + delivery channel and the configuration recorder is still + running. To delete the delivery channel, stop the running + configuration recorder using the StopConfigurationRecorder + action. + + :type delivery_channel_name: string + :param delivery_channel_name: The name of the delivery channel to + delete. + + """ + params = {'DeliveryChannelName': delivery_channel_name, } + return self.make_request(action='DeleteDeliveryChannel', + body=json.dumps(params)) + + def deliver_config_snapshot(self, delivery_channel_name): + """ + Schedules delivery of a configuration snapshot to the Amazon + S3 bucket in the specified delivery channel. After the + delivery has started, AWS Config sends following notifications + using an Amazon SNS topic that you have specified. + + + + Notification of starting the delivery. + + Notification of delivery completed, if the delivery was + successfully completed. + + Notification of delivery failure, if the delivery failed to + complete. + + :type delivery_channel_name: string + :param delivery_channel_name: The name of the delivery channel through + which the snapshot is delivered. + + """ + params = {'deliveryChannelName': delivery_channel_name, } + return self.make_request(action='DeliverConfigSnapshot', + body=json.dumps(params)) + + def describe_configuration_recorder_status(self, + configuration_recorder_names=None): + """ + Returns the current status of the specified configuration + recorder. If a configuration recorder is not specified, this + action returns the status of all configuration recorder + associated with the account. + + :type configuration_recorder_names: list + :param configuration_recorder_names: The name(s) of the configuration + recorder. If the name is not specified, the action returns the + current status of all the configuration recorders associated with + the account. + + """ + params = {} + if configuration_recorder_names is not None: + params['ConfigurationRecorderNames'] = configuration_recorder_names + return self.make_request(action='DescribeConfigurationRecorderStatus', + body=json.dumps(params)) + + def describe_configuration_recorders(self, + configuration_recorder_names=None): + """ + Returns the name of one or more specified configuration + recorders. If the recorder name is not specified, this action + returns the names of all the configuration recorders + associated with the account. + + :type configuration_recorder_names: list + :param configuration_recorder_names: A list of configuration recorder + names. + + """ + params = {} + if configuration_recorder_names is not None: + params['ConfigurationRecorderNames'] = configuration_recorder_names + return self.make_request(action='DescribeConfigurationRecorders', + body=json.dumps(params)) + + def describe_delivery_channel_status(self, delivery_channel_names=None): + """ + Returns the current status of the specified delivery channel. + If a delivery channel is not specified, this action returns + the current status of all delivery channels associated with + the account. + + :type delivery_channel_names: list + :param delivery_channel_names: A list of delivery channel names. + + """ + params = {} + if delivery_channel_names is not None: + params['DeliveryChannelNames'] = delivery_channel_names + return self.make_request(action='DescribeDeliveryChannelStatus', + body=json.dumps(params)) + + def describe_delivery_channels(self, delivery_channel_names=None): + """ + Returns details about the specified delivery channel. If a + delivery channel is not specified, this action returns the + details of all delivery channels associated with the account. + + :type delivery_channel_names: list + :param delivery_channel_names: A list of delivery channel names. + + """ + params = {} + if delivery_channel_names is not None: + params['DeliveryChannelNames'] = delivery_channel_names + return self.make_request(action='DescribeDeliveryChannels', + body=json.dumps(params)) + + def get_resource_config_history(self, resource_type, resource_id, + later_time=None, earlier_time=None, + chronological_order=None, limit=None, + next_token=None): + """ + Returns a list of configuration items for the specified + resource. The list contains details about each state of the + resource during the specified time interval. You can specify a + `limit` on the number of results returned on the page. If a + limit is specified, a `nextToken` is returned as part of the + result that you can use to continue this request. + + :type resource_type: string + :param resource_type: The resource type. + + :type resource_id: string + :param resource_id: The ID of the resource (for example., `sg-xxxxxx`). + + :type later_time: timestamp + :param later_time: The time stamp that indicates a later time. If not + specified, current time is taken. + + :type earlier_time: timestamp + :param earlier_time: The time stamp that indicates an earlier time. If + not specified, the action returns paginated results that contain + configuration items that start from when the first configuration + item was recorded. + + :type chronological_order: string + :param chronological_order: The chronological order for configuration + items listed. By default the results are listed in reverse + chronological order. + + :type limit: integer + :param limit: The maximum number of configuration items returned in + each page. The default is 10. You cannot specify a limit greater + than 100. + + :type next_token: string + :param next_token: An optional parameter used for pagination of the + results. + + """ + params = { + 'resourceType': resource_type, + 'resourceId': resource_id, + } + if later_time is not None: + params['laterTime'] = later_time + if earlier_time is not None: + params['earlierTime'] = earlier_time + if chronological_order is not None: + params['chronologicalOrder'] = chronological_order + if limit is not None: + params['limit'] = limit + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='GetResourceConfigHistory', + body=json.dumps(params)) + + def put_configuration_recorder(self, configuration_recorder): + """ + Creates a new configuration recorder to record the resource + configurations. + + You can use this action to change the role ( `roleARN`) of an + existing recorder. To change the role, call the action on the + existing configuration recorder and specify a role. + + :type configuration_recorder: dict + :param configuration_recorder: The configuration recorder object that + records each configuration change made to the resources. + + """ + params = {'ConfigurationRecorder': configuration_recorder, } + return self.make_request(action='PutConfigurationRecorder', + body=json.dumps(params)) + + def put_delivery_channel(self, delivery_channel): + """ + Creates a new delivery channel object to deliver the + configuration information to an Amazon S3 bucket, and to an + Amazon SNS topic. + + You can use this action to change the Amazon S3 bucket or an + Amazon SNS topic of the existing delivery channel. To change + the Amazon S3 bucket or an Amazon SNS topic, call this action + and specify the changed values for the S3 bucket and the SNS + topic. If you specify a different value for either the S3 + bucket or the SNS topic, this action will keep the existing + value for the parameter that is not changed. + + :type delivery_channel: dict + :param delivery_channel: The configuration delivery channel object that + delivers the configuration information to an Amazon S3 bucket, and + to an Amazon SNS topic. + + """ + params = {'DeliveryChannel': delivery_channel, } + return self.make_request(action='PutDeliveryChannel', + body=json.dumps(params)) + + def start_configuration_recorder(self, configuration_recorder_name): + """ + Starts recording configurations of all the resources + associated with the account. + + You must have created at least one delivery channel to + successfully start the configuration recorder. + + :type configuration_recorder_name: string + :param configuration_recorder_name: The name of the recorder object + that records each configuration change made to the resources. + + """ + params = { + 'ConfigurationRecorderName': configuration_recorder_name, + } + return self.make_request(action='StartConfigurationRecorder', + body=json.dumps(params)) + + def stop_configuration_recorder(self, configuration_recorder_name): + """ + Stops recording configurations of all the resources associated + with the account. + + :type configuration_recorder_name: string + :param configuration_recorder_name: The name of the recorder object + that records each configuration change made to the resources. + + """ + params = { + 'ConfigurationRecorderName': configuration_recorder_name, + } + return self.make_request(action='StopConfigurationRecorder', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..28bb320ab4947d4429830486bc26bde2f7fb0103 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/connection.py @@ -0,0 +1,1227 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# Copyright (c) 2010 Google +# Copyright (c) 2008 rPath, Inc. +# Copyright (c) 2009 The Echo Nest Corporation +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# +# Parts of this code were copied or derived from sample code supplied by AWS. +# The following notice applies to that code. +# +# This software code is made available "AS IS" without warranties of any +# kind. You may copy, display, modify and redistribute the software +# code either by itself or as incorporated into your code; provided that +# you do not remove any proprietary notices. Your use of this software +# code is at your own risk and you waive any claim against Amazon +# Digital Services, Inc. or its affiliates with respect to your use of +# this software code. (c) 2006 Amazon Digital Services, Inc. or its +# affiliates. + +""" +Handles basic connections to AWS +""" +from datetime import datetime +import errno +import os +import random +import re +import socket +import sys +import time +import xml.sax +import copy + +from boto import auth +from boto import auth_handler +import boto +import boto.utils +import boto.handler +import boto.cacerts + +from boto import config, UserAgent +from boto.compat import six, http_client, urlparse, quote, encodebytes +from boto.exception import AWSConnectionError +from boto.exception import BotoClientError +from boto.exception import BotoServerError +from boto.exception import PleaseRetryException +from boto.provider import Provider +from boto.resultset import ResultSet + +HAVE_HTTPS_CONNECTION = False +try: + import ssl + from boto import https_connection + # Google App Engine runs on Python 2.5 so doesn't have ssl.SSLError. + if hasattr(ssl, 'SSLError'): + HAVE_HTTPS_CONNECTION = True +except ImportError: + pass + +try: + import threading +except ImportError: + import dummy_threading as threading + +ON_APP_ENGINE = all(key in os.environ for key in ( + 'USER_IS_ADMIN', 'CURRENT_VERSION_ID', 'APPLICATION_ID')) + +PORTS_BY_SECURITY = {True: 443, + False: 80} + +DEFAULT_CA_CERTS_FILE = os.path.join(os.path.dirname(os.path.abspath(boto.cacerts.__file__)), "cacerts.txt") + + +class HostConnectionPool(object): + + """ + A pool of connections for one remote (host,port,is_secure). + + When connections are added to the pool, they are put into a + pending queue. The _mexe method returns connections to the pool + before the response body has been read, so they connections aren't + ready to send another request yet. They stay in the pending queue + until they are ready for another request, at which point they are + returned to the pool of ready connections. + + The pool of ready connections is an ordered list of + (connection,time) pairs, where the time is the time the connection + was returned from _mexe. After a certain period of time, + connections are considered stale, and discarded rather than being + reused. This saves having to wait for the connection to time out + if AWS has decided to close it on the other end because of + inactivity. + + Thread Safety: + + This class is used only from ConnectionPool while it's mutex + is held. + """ + + def __init__(self): + self.queue = [] + + def size(self): + """ + Returns the number of connections in the pool for this host. + Some of the connections may still be in use, and may not be + ready to be returned by get(). + """ + return len(self.queue) + + def put(self, conn): + """ + Adds a connection to the pool, along with the time it was + added. + """ + self.queue.append((conn, time.time())) + + def get(self): + """ + Returns the next connection in this pool that is ready to be + reused. Returns None if there aren't any. + """ + # Discard ready connections that are too old. + self.clean() + + # Return the first connection that is ready, and remove it + # from the queue. Connections that aren't ready are returned + # to the end of the queue with an updated time, on the + # assumption that somebody is actively reading the response. + for _ in range(len(self.queue)): + (conn, _) = self.queue.pop(0) + if self._conn_ready(conn): + return conn + else: + self.put(conn) + return None + + def _conn_ready(self, conn): + """ + There is a nice state diagram at the top of http_client.py. It + indicates that once the response headers have been read (which + _mexe does before adding the connection to the pool), a + response is attached to the connection, and it stays there + until it's done reading. This isn't entirely true: even after + the client is done reading, the response may be closed, but + not removed from the connection yet. + + This is ugly, reading a private instance variable, but the + state we care about isn't available in any public methods. + """ + if ON_APP_ENGINE: + # Google AppEngine implementation of HTTPConnection doesn't contain + # _HTTPConnection__response attribute. Moreover, it's not possible + # to determine if given connection is ready. Reusing connections + # simply doesn't make sense with App Engine urlfetch service. + return False + else: + response = getattr(conn, '_HTTPConnection__response', None) + return (response is None) or response.isclosed() + + def clean(self): + """ + Get rid of stale connections. + """ + # Note that we do not close the connection here -- somebody + # may still be reading from it. + while len(self.queue) > 0 and self._pair_stale(self.queue[0]): + self.queue.pop(0) + + def _pair_stale(self, pair): + """ + Returns true of the (connection,time) pair is too old to be + used. + """ + (_conn, return_time) = pair + now = time.time() + return return_time + ConnectionPool.STALE_DURATION < now + + +class ConnectionPool(object): + + """ + A connection pool that expires connections after a fixed period of + time. This saves time spent waiting for a connection that AWS has + timed out on the other end. + + This class is thread-safe. + """ + + # + # The amout of time between calls to clean. + # + + CLEAN_INTERVAL = 5.0 + + # + # How long before a connection becomes "stale" and won't be reused + # again. The intention is that this time is less that the timeout + # period that AWS uses, so we'll never try to reuse a connection + # and find that AWS is timing it out. + # + # Experimentation in July 2011 shows that AWS starts timing things + # out after three minutes. The 60 seconds here is conservative so + # we should never hit that 3-minute timout. + # + + STALE_DURATION = 60.0 + + def __init__(self): + # Mapping from (host,port,is_secure) to HostConnectionPool. + # If a pool becomes empty, it is removed. + self.host_to_pool = {} + # The last time the pool was cleaned. + self.last_clean_time = 0.0 + self.mutex = threading.Lock() + ConnectionPool.STALE_DURATION = \ + config.getfloat('Boto', 'connection_stale_duration', + ConnectionPool.STALE_DURATION) + + def __getstate__(self): + pickled_dict = copy.copy(self.__dict__) + pickled_dict['host_to_pool'] = {} + del pickled_dict['mutex'] + return pickled_dict + + def __setstate__(self, dct): + self.__init__() + + def size(self): + """ + Returns the number of connections in the pool. + """ + return sum(pool.size() for pool in self.host_to_pool.values()) + + def get_http_connection(self, host, port, is_secure): + """ + Gets a connection from the pool for the named host. Returns + None if there is no connection that can be reused. It's the caller's + responsibility to call close() on the connection when it's no longer + needed. + """ + self.clean() + with self.mutex: + key = (host, port, is_secure) + if key not in self.host_to_pool: + return None + return self.host_to_pool[key].get() + + def put_http_connection(self, host, port, is_secure, conn): + """ + Adds a connection to the pool of connections that can be + reused for the named host. + """ + with self.mutex: + key = (host, port, is_secure) + if key not in self.host_to_pool: + self.host_to_pool[key] = HostConnectionPool() + self.host_to_pool[key].put(conn) + + def clean(self): + """ + Clean up the stale connections in all of the pools, and then + get rid of empty pools. Pools clean themselves every time a + connection is fetched; this cleaning takes care of pools that + aren't being used any more, so nothing is being gotten from + them. + """ + with self.mutex: + now = time.time() + if self.last_clean_time + self.CLEAN_INTERVAL < now: + to_remove = [] + for (host, pool) in self.host_to_pool.items(): + pool.clean() + if pool.size() == 0: + to_remove.append(host) + for host in to_remove: + del self.host_to_pool[host] + self.last_clean_time = now + + +class HTTPRequest(object): + + def __init__(self, method, protocol, host, port, path, auth_path, + params, headers, body): + """Represents an HTTP request. + + :type method: string + :param method: The HTTP method name, 'GET', 'POST', 'PUT' etc. + + :type protocol: string + :param protocol: The http protocol used, 'http' or 'https'. + + :type host: string + :param host: Host to which the request is addressed. eg. abc.com + + :type port: int + :param port: port on which the request is being sent. Zero means unset, + in which case default port will be chosen. + + :type path: string + :param path: URL path that is being accessed. + + :type auth_path: string + :param path: The part of the URL path used when creating the + authentication string. + + :type params: dict + :param params: HTTP url query parameters, with key as name of + the param, and value as value of param. + + :type headers: dict + :param headers: HTTP headers, with key as name of the header and value + as value of header. + + :type body: string + :param body: Body of the HTTP request. If not present, will be None or + empty string (''). + """ + self.method = method + self.protocol = protocol + self.host = host + self.port = port + self.path = path + if auth_path is None: + auth_path = path + self.auth_path = auth_path + self.params = params + # chunked Transfer-Encoding should act only on PUT request. + if headers and 'Transfer-Encoding' in headers and \ + headers['Transfer-Encoding'] == 'chunked' and \ + self.method != 'PUT': + self.headers = headers.copy() + del self.headers['Transfer-Encoding'] + else: + self.headers = headers + self.body = body + + def __str__(self): + return (('method:(%s) protocol:(%s) host(%s) port(%s) path(%s) ' + 'params(%s) headers(%s) body(%s)') % (self.method, + self.protocol, self.host, self.port, self.path, self.params, + self.headers, self.body)) + + def authorize(self, connection, **kwargs): + if not getattr(self, '_headers_quoted', False): + for key in self.headers: + val = self.headers[key] + if isinstance(val, six.text_type): + safe = '!"#$%&\'()*+,/:;<=>?@[\\]^`{|}~' + self.headers[key] = quote(val.encode('utf-8'), safe) + setattr(self, '_headers_quoted', True) + + self.headers['User-Agent'] = UserAgent + + connection._auth_handler.add_auth(self, **kwargs) + + # I'm not sure if this is still needed, now that add_auth is + # setting the content-length for POST requests. + if 'Content-Length' not in self.headers: + if 'Transfer-Encoding' not in self.headers or \ + self.headers['Transfer-Encoding'] != 'chunked': + self.headers['Content-Length'] = str(len(self.body)) + + +class HTTPResponse(http_client.HTTPResponse): + + def __init__(self, *args, **kwargs): + http_client.HTTPResponse.__init__(self, *args, **kwargs) + self._cached_response = '' + + def read(self, amt=None): + """Read the response. + + This method does not have the same behavior as + http_client.HTTPResponse.read. Instead, if this method is called with + no ``amt`` arg, then the response body will be cached. Subsequent + calls to ``read()`` with no args **will return the cached response**. + + """ + if amt is None: + # The reason for doing this is that many places in boto call + # response.read() and except to get the response body that they + # can then process. To make sure this always works as they expect + # we're caching the response so that multiple calls to read() + # will return the full body. Note that this behavior only + # happens if the amt arg is not specified. + if not self._cached_response: + self._cached_response = http_client.HTTPResponse.read(self) + return self._cached_response + else: + return http_client.HTTPResponse.read(self, amt) + + +class AWSAuthConnection(object): + def __init__(self, host, aws_access_key_id=None, + aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, path='/', + provider='aws', security_token=None, + suppress_consec_slashes=True, + validate_certs=True, profile_name=None): + """ + :type host: str + :param host: The host to make the connection to + + :keyword str aws_access_key_id: Your AWS Access Key ID (provided by + Amazon). If none is specified, the value in your + ``AWS_ACCESS_KEY_ID`` environmental variable is used. + :keyword str aws_secret_access_key: Your AWS Secret Access Key + (provided by Amazon). If none is specified, the value in your + ``AWS_SECRET_ACCESS_KEY`` environmental variable is used. + :keyword str security_token: The security token associated with + temporary credentials issued by STS. Optional unless using + temporary credentials. If none is specified, the environment + variable ``AWS_SECURITY_TOKEN`` is used if defined. + + :type is_secure: boolean + :param is_secure: Whether the connection is over SSL + + :type https_connection_factory: list or tuple + :param https_connection_factory: A pair of an HTTP connection + factory and the exceptions to catch. The factory should have + a similar interface to L{http_client.HTTPSConnection}. + + :param str proxy: Address/hostname for a proxy server + + :type proxy_port: int + :param proxy_port: The port to use when connecting over a proxy + + :type proxy_user: str + :param proxy_user: The username to connect with on the proxy + + :type proxy_pass: str + :param proxy_pass: The password to use when connection over a proxy. + + :type port: int + :param port: The port to use to connect + + :type suppress_consec_slashes: bool + :param suppress_consec_slashes: If provided, controls whether + consecutive slashes will be suppressed in key paths. + + :type validate_certs: bool + :param validate_certs: Controls whether SSL certificates + will be validated or not. Defaults to True. + + :type profile_name: str + :param profile_name: Override usual Credentials section in config + file to use a named set of keys instead. + """ + self.suppress_consec_slashes = suppress_consec_slashes + self.num_retries = 6 + # Override passed-in is_secure setting if value was defined in config. + if config.has_option('Boto', 'is_secure'): + is_secure = config.getboolean('Boto', 'is_secure') + self.is_secure = is_secure + # Whether or not to validate server certificates. + # The default is now to validate certificates. This can be + # overridden in the boto config file are by passing an + # explicit validate_certs parameter to the class constructor. + self.https_validate_certificates = config.getbool( + 'Boto', 'https_validate_certificates', + validate_certs) + if self.https_validate_certificates and not HAVE_HTTPS_CONNECTION: + raise BotoClientError( + "SSL server certificate validation is enabled in boto " + "configuration, but Python dependencies required to " + "support this feature are not available. Certificate " + "validation is only supported when running under Python " + "2.6 or later.") + certs_file = config.get_value( + 'Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE) + if certs_file == 'system': + certs_file = None + self.ca_certificates_file = certs_file + if port: + self.port = port + else: + self.port = PORTS_BY_SECURITY[is_secure] + + self.handle_proxy(proxy, proxy_port, proxy_user, proxy_pass) + # define exceptions from http_client that we want to catch and retry + self.http_exceptions = (http_client.HTTPException, socket.error, + socket.gaierror, http_client.BadStatusLine) + # define subclasses of the above that are not retryable. + self.http_unretryable_exceptions = [] + if HAVE_HTTPS_CONNECTION: + self.http_unretryable_exceptions.append( + https_connection.InvalidCertificateException) + + # define values in socket exceptions we don't want to catch + self.socket_exception_values = (errno.EINTR,) + if https_connection_factory is not None: + self.https_connection_factory = https_connection_factory[0] + self.http_exceptions += https_connection_factory[1] + else: + self.https_connection_factory = None + if (is_secure): + self.protocol = 'https' + else: + self.protocol = 'http' + self.host = host + self.path = path + # if the value passed in for debug + if not isinstance(debug, six.integer_types): + debug = 0 + self.debug = config.getint('Boto', 'debug', debug) + self.host_header = None + + # Timeout used to tell http_client how long to wait for socket timeouts. + # Default is to leave timeout unchanged, which will in turn result in + # the socket's default global timeout being used. To specify a + # timeout, set http_socket_timeout in Boto config. Regardless, + # timeouts will only be applied if Python is 2.6 or greater. + self.http_connection_kwargs = {} + if (sys.version_info[0], sys.version_info[1]) >= (2, 6): + # If timeout isn't defined in boto config file, use 70 second + # default as recommended by + # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_PollForActivityTask.html + self.http_connection_kwargs['timeout'] = config.getint( + 'Boto', 'http_socket_timeout', 70) + + if isinstance(provider, Provider): + # Allow overriding Provider + self.provider = provider + else: + self._provider_type = provider + self.provider = Provider(self._provider_type, + aws_access_key_id, + aws_secret_access_key, + security_token, + profile_name) + + # Allow config file to override default host, port, and host header. + if self.provider.host: + self.host = self.provider.host + if self.provider.port: + self.port = self.provider.port + if self.provider.host_header: + self.host_header = self.provider.host_header + + self._pool = ConnectionPool() + self._connection = (self.host, self.port, self.is_secure) + self._last_rs = None + self._auth_handler = auth.get_auth_handler( + host, config, self.provider, self._required_auth_capability()) + if getattr(self, 'AuthServiceName', None) is not None: + self.auth_service_name = self.AuthServiceName + self.request_hook = None + + def __repr__(self): + return '%s:%s' % (self.__class__.__name__, self.host) + + def _required_auth_capability(self): + return [] + + def _get_auth_service_name(self): + return getattr(self._auth_handler, 'service_name') + + # For Sigv4, the auth_service_name/auth_region_name properties allow + # the service_name/region_name to be explicitly set instead of being + # derived from the endpoint url. + def _set_auth_service_name(self, value): + self._auth_handler.service_name = value + auth_service_name = property(_get_auth_service_name, _set_auth_service_name) + + def _get_auth_region_name(self): + return getattr(self._auth_handler, 'region_name') + + def _set_auth_region_name(self, value): + self._auth_handler.region_name = value + auth_region_name = property(_get_auth_region_name, _set_auth_region_name) + + def connection(self): + return self.get_http_connection(*self._connection) + connection = property(connection) + + def aws_access_key_id(self): + return self.provider.access_key + aws_access_key_id = property(aws_access_key_id) + gs_access_key_id = aws_access_key_id + access_key = aws_access_key_id + + def aws_secret_access_key(self): + return self.provider.secret_key + aws_secret_access_key = property(aws_secret_access_key) + gs_secret_access_key = aws_secret_access_key + secret_key = aws_secret_access_key + + def profile_name(self): + return self.provider.profile_name + profile_name = property(profile_name) + + def get_path(self, path='/'): + # The default behavior is to suppress consecutive slashes for reasons + # discussed at + # https://groups.google.com/forum/#!topic/boto-dev/-ft0XPUy0y8 + # You can override that behavior with the suppress_consec_slashes param. + if not self.suppress_consec_slashes: + return self.path + re.sub('^(/*)/', "\\1", path) + pos = path.find('?') + if pos >= 0: + params = path[pos:] + path = path[:pos] + else: + params = None + if path[-1] == '/': + need_trailing = True + else: + need_trailing = False + path_elements = self.path.split('/') + path_elements.extend(path.split('/')) + path_elements = [p for p in path_elements if p] + path = '/' + '/'.join(path_elements) + if path[-1] != '/' and need_trailing: + path += '/' + if params: + path = path + params + return path + + def server_name(self, port=None): + if not port: + port = self.port + if port == 80: + signature_host = self.host + else: + # This unfortunate little hack can be attributed to + # a difference in the 2.6 version of http_client. In old + # versions, it would append ":443" to the hostname sent + # in the Host header and so we needed to make sure we + # did the same when calculating the V2 signature. In 2.6 + # (and higher!) + # it no longer does that. Hence, this kludge. + if ((ON_APP_ENGINE and sys.version[:3] == '2.5') or + sys.version[:3] in ('2.6', '2.7')) and port == 443: + signature_host = self.host + else: + signature_host = '%s:%d' % (self.host, port) + return signature_host + + def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass): + self.proxy = proxy + self.proxy_port = proxy_port + self.proxy_user = proxy_user + self.proxy_pass = proxy_pass + if 'http_proxy' in os.environ and not self.proxy: + pattern = re.compile( + '(?:http://)?' + '(?:(?P[\w\-\.]+):(?P.*)@)?' + '(?P[\w\-\.]+)' + '(?::(?P\d+))?' + ) + match = pattern.match(os.environ['http_proxy']) + if match: + self.proxy = match.group('host') + self.proxy_port = match.group('port') + self.proxy_user = match.group('user') + self.proxy_pass = match.group('pass') + else: + if not self.proxy: + self.proxy = config.get_value('Boto', 'proxy', None) + if not self.proxy_port: + self.proxy_port = config.get_value('Boto', 'proxy_port', None) + if not self.proxy_user: + self.proxy_user = config.get_value('Boto', 'proxy_user', None) + if not self.proxy_pass: + self.proxy_pass = config.get_value('Boto', 'proxy_pass', None) + + if not self.proxy_port and self.proxy: + print("http_proxy environment variable does not specify " + "a port, using default") + self.proxy_port = self.port + + self.no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '') + self.use_proxy = (self.proxy is not None) + + def get_http_connection(self, host, port, is_secure): + conn = self._pool.get_http_connection(host, port, is_secure) + if conn is not None: + return conn + else: + return self.new_http_connection(host, port, is_secure) + + def skip_proxy(self, host): + if not self.no_proxy: + return False + + if self.no_proxy == "*": + return True + + hostonly = host + hostonly = host.split(':')[0] + + for name in self.no_proxy.split(','): + if name and (hostonly.endswith(name) or host.endswith(name)): + return True + + return False + + def new_http_connection(self, host, port, is_secure): + if host is None: + host = self.server_name() + + # Make sure the host is really just the host, not including + # the port number + host = host.split(':', 1)[0] + + http_connection_kwargs = self.http_connection_kwargs.copy() + + # Connection factories below expect a port keyword argument + http_connection_kwargs['port'] = port + + # Override host with proxy settings if needed + if self.use_proxy and not is_secure and \ + not self.skip_proxy(host): + host = self.proxy + http_connection_kwargs['port'] = int(self.proxy_port) + + if is_secure: + boto.log.debug( + 'establishing HTTPS connection: host=%s, kwargs=%s', + host, http_connection_kwargs) + if self.use_proxy and not self.skip_proxy(host): + connection = self.proxy_ssl(host, is_secure and 443 or 80) + elif self.https_connection_factory: + connection = self.https_connection_factory(host) + elif self.https_validate_certificates and HAVE_HTTPS_CONNECTION: + connection = https_connection.CertValidatingHTTPSConnection( + host, ca_certs=self.ca_certificates_file, + **http_connection_kwargs) + else: + connection = http_client.HTTPSConnection( + host, **http_connection_kwargs) + else: + boto.log.debug('establishing HTTP connection: kwargs=%s' % + http_connection_kwargs) + if self.https_connection_factory: + # even though the factory says https, this is too handy + # to not be able to allow overriding for http also. + connection = self.https_connection_factory( + host, **http_connection_kwargs) + else: + connection = http_client.HTTPConnection( + host, **http_connection_kwargs) + if self.debug > 1: + connection.set_debuglevel(self.debug) + # self.connection must be maintained for backwards-compatibility + # however, it must be dynamically pulled from the connection pool + # set a private variable which will enable that + if host.split(':')[0] == self.host and is_secure == self.is_secure: + self._connection = (host, port, is_secure) + # Set the response class of the http connection to use our custom + # class. + connection.response_class = HTTPResponse + return connection + + def put_http_connection(self, host, port, is_secure, connection): + self._pool.put_http_connection(host, port, is_secure, connection) + + def proxy_ssl(self, host=None, port=None): + if host and port: + host = '%s:%d' % (host, port) + else: + host = '%s:%d' % (self.host, self.port) + # Seems properly to use timeout for connect too + timeout = self.http_connection_kwargs.get("timeout") + if timeout is not None: + sock = socket.create_connection((self.proxy, + int(self.proxy_port)), timeout) + else: + sock = socket.create_connection((self.proxy, int(self.proxy_port))) + boto.log.debug("Proxy connection: CONNECT %s HTTP/1.0\r\n", host) + sock.sendall("CONNECT %s HTTP/1.0\r\n" % host) + sock.sendall("User-Agent: %s\r\n" % UserAgent) + if self.proxy_user and self.proxy_pass: + for k, v in self.get_proxy_auth_header().items(): + sock.sendall("%s: %s\r\n" % (k, v)) + # See discussion about this config option at + # https://groups.google.com/forum/?fromgroups#!topic/boto-dev/teenFvOq2Cc + if config.getbool('Boto', 'send_crlf_after_proxy_auth_headers', False): + sock.sendall("\r\n") + else: + sock.sendall("\r\n") + resp = http_client.HTTPResponse(sock, strict=True, debuglevel=self.debug) + resp.begin() + + if resp.status != 200: + # Fake a socket error, use a code that make it obvious it hasn't + # been generated by the socket library + raise socket.error(-71, + "Error talking to HTTP proxy %s:%s: %s (%s)" % + (self.proxy, self.proxy_port, + resp.status, resp.reason)) + + # We can safely close the response, it duped the original socket + resp.close() + + h = http_client.HTTPConnection(host) + + if self.https_validate_certificates and HAVE_HTTPS_CONNECTION: + msg = "wrapping ssl socket for proxied connection; " + if self.ca_certificates_file: + msg += "CA certificate file=%s" % self.ca_certificates_file + else: + msg += "using system provided SSL certs" + boto.log.debug(msg) + key_file = self.http_connection_kwargs.get('key_file', None) + cert_file = self.http_connection_kwargs.get('cert_file', None) + sslSock = ssl.wrap_socket(sock, keyfile=key_file, + certfile=cert_file, + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=self.ca_certificates_file) + cert = sslSock.getpeercert() + hostname = self.host.split(':', 0)[0] + if not https_connection.ValidateCertificateHostname(cert, hostname): + raise https_connection.InvalidCertificateException( + hostname, cert, 'hostname mismatch') + else: + # Fallback for old Python without ssl.wrap_socket + if hasattr(http_client, 'ssl'): + sslSock = http_client.ssl.SSLSocket(sock) + else: + sslSock = socket.ssl(sock, None, None) + sslSock = http_client.FakeSocket(sock, sslSock) + + # This is a bit unclean + h.sock = sslSock + return h + + def prefix_proxy_to_path(self, path, host=None): + path = self.protocol + '://' + (host or self.server_name()) + path + return path + + def get_proxy_auth_header(self): + auth = encodebytes(self.proxy_user + ':' + self.proxy_pass) + return {'Proxy-Authorization': 'Basic %s' % auth} + + # For passing proxy information to other connection libraries, e.g. cloudsearch2 + def get_proxy_url_with_auth(self): + if not self.use_proxy: + return None + + if self.proxy_user or self.proxy_pass: + if self.proxy_pass: + login_info = '%s:%s@' % (self.proxy_user, self.proxy_pass) + else: + login_info = '%s@' % self.proxy_user + else: + login_info = '' + + return 'http://%s%s:%s' % (login_info, self.proxy, str(self.proxy_port or self.port)) + + def set_host_header(self, request): + try: + request.headers['Host'] = \ + self._auth_handler.host_header(self.host, request) + except AttributeError: + request.headers['Host'] = self.host.split(':', 1)[0] + + def set_request_hook(self, hook): + self.request_hook = hook + + def _mexe(self, request, sender=None, override_num_retries=None, + retry_handler=None): + """ + mexe - Multi-execute inside a loop, retrying multiple times to handle + transient Internet errors by simply trying again. + Also handles redirects. + + This code was inspired by the S3Utils classes posted to the boto-users + Google group by Larry Bates. Thanks! + + """ + boto.log.debug('Method: %s' % request.method) + boto.log.debug('Path: %s' % request.path) + boto.log.debug('Data: %s' % request.body) + boto.log.debug('Headers: %s' % request.headers) + boto.log.debug('Host: %s' % request.host) + boto.log.debug('Port: %s' % request.port) + boto.log.debug('Params: %s' % request.params) + response = None + body = None + ex = None + if override_num_retries is None: + num_retries = config.getint('Boto', 'num_retries', self.num_retries) + else: + num_retries = override_num_retries + i = 0 + connection = self.get_http_connection(request.host, request.port, + self.is_secure) + + # Convert body to bytes if needed + if not isinstance(request.body, bytes) and hasattr(request.body, + 'encode'): + request.body = request.body.encode('utf-8') + + while i <= num_retries: + # Use binary exponential backoff to desynchronize client requests. + next_sleep = min(random.random() * (2 ** i), + boto.config.get('Boto', 'max_retry_delay', 60)) + try: + # we now re-sign each request before it is retried + boto.log.debug('Token: %s' % self.provider.security_token) + request.authorize(connection=self) + # Only force header for non-s3 connections, because s3 uses + # an older signing method + bucket resource URLs that include + # the port info. All others should be now be up to date and + # not include the port. + if 's3' not in self._required_auth_capability(): + if not getattr(self, 'anon', False): + if not request.headers.get('Host'): + self.set_host_header(request) + boto.log.debug('Final headers: %s' % request.headers) + request.start_time = datetime.now() + if callable(sender): + response = sender(connection, request.method, request.path, + request.body, request.headers) + else: + connection.request(request.method, request.path, + request.body, request.headers) + response = connection.getresponse() + boto.log.debug('Response headers: %s' % response.getheaders()) + location = response.getheader('location') + # -- gross hack -- + # http_client gets confused with chunked responses to HEAD requests + # so I have to fake it out + if request.method == 'HEAD' and getattr(response, + 'chunked', False): + response.chunked = 0 + if callable(retry_handler): + status = retry_handler(response, i, next_sleep) + if status: + msg, i, next_sleep = status + if msg: + boto.log.debug(msg) + time.sleep(next_sleep) + continue + if response.status in [500, 502, 503, 504]: + msg = 'Received %d response. ' % response.status + msg += 'Retrying in %3.1f seconds' % next_sleep + boto.log.debug(msg) + body = response.read() + if isinstance(body, bytes): + body = body.decode('utf-8') + elif response.status < 300 or response.status >= 400 or \ + not location: + # don't return connection to the pool if response contains + # Connection:close header, because the connection has been + # closed and default reconnect behavior may do something + # different than new_http_connection. Also, it's probably + # less efficient to try to reuse a closed connection. + conn_header_value = response.getheader('connection') + if conn_header_value == 'close': + connection.close() + else: + self.put_http_connection(request.host, request.port, + self.is_secure, connection) + if self.request_hook is not None: + self.request_hook.handle_request_data(request, response) + return response + else: + scheme, request.host, request.path, \ + params, query, fragment = urlparse(location) + if query: + request.path += '?' + query + # urlparse can return both host and port in netloc, so if + # that's the case we need to split them up properly + if ':' in request.host: + request.host, request.port = request.host.split(':', 1) + msg = 'Redirecting: %s' % scheme + '://' + msg += request.host + request.path + boto.log.debug(msg) + connection = self.get_http_connection(request.host, + request.port, + scheme == 'https') + response = None + continue + except PleaseRetryException as e: + boto.log.debug('encountered a retry exception: %s' % e) + connection = self.new_http_connection(request.host, request.port, + self.is_secure) + response = e.response + ex = e + except self.http_exceptions as e: + for unretryable in self.http_unretryable_exceptions: + if isinstance(e, unretryable): + boto.log.debug( + 'encountered unretryable %s exception, re-raising' % + e.__class__.__name__) + raise + boto.log.debug('encountered %s exception, reconnecting' % + e.__class__.__name__) + connection = self.new_http_connection(request.host, request.port, + self.is_secure) + ex = e + time.sleep(next_sleep) + i += 1 + # If we made it here, it's because we have exhausted our retries + # and stil haven't succeeded. So, if we have a response object, + # use it to raise an exception. + # Otherwise, raise the exception that must have already happened. + if self.request_hook is not None: + self.request_hook.handle_request_data(request, response, error=True) + if response: + raise BotoServerError(response.status, response.reason, body) + elif ex: + raise ex + else: + msg = 'Please report this exception as a Boto Issue!' + raise BotoClientError(msg) + + def build_base_http_request(self, method, path, auth_path, + params=None, headers=None, data='', host=None): + path = self.get_path(path) + if auth_path is not None: + auth_path = self.get_path(auth_path) + if params is None: + params = {} + else: + params = params.copy() + if headers is None: + headers = {} + else: + headers = headers.copy() + if self.host_header and not boto.utils.find_matching_headers('host', headers): + headers['host'] = self.host_header + host = host or self.host + if self.use_proxy: + if not auth_path: + auth_path = path + path = self.prefix_proxy_to_path(path, host) + if self.proxy_user and self.proxy_pass and not self.is_secure: + # If is_secure, we don't have to set the proxy authentication + # header here, we did that in the CONNECT to the proxy. + headers.update(self.get_proxy_auth_header()) + return HTTPRequest(method, self.protocol, host, self.port, + path, auth_path, params, headers, data) + + def make_request(self, method, path, headers=None, data='', host=None, + auth_path=None, sender=None, override_num_retries=None, + params=None, retry_handler=None): + """Makes a request to the server, with stock multiple-retry logic.""" + if params is None: + params = {} + http_request = self.build_base_http_request(method, path, auth_path, + params, headers, data, host) + return self._mexe(http_request, sender, override_num_retries, + retry_handler=retry_handler) + + def close(self): + """(Optional) Close any open HTTP connections. This is non-destructive, + and making a new request will open a connection again.""" + + boto.log.debug('closing all HTTP connections') + self._connection = None # compat field + + +class AWSQueryConnection(AWSAuthConnection): + + APIVersion = '' + ResponseError = BotoServerError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, host=None, debug=0, + https_connection_factory=None, path='/', security_token=None, + validate_certs=True, profile_name=None, provider='aws'): + super(AWSQueryConnection, self).__init__( + host, aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, + proxy_port, proxy_user, proxy_pass, + debug, https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name, + provider=provider) + + def _required_auth_capability(self): + return [] + + def get_utf8_value(self, value): + return boto.utils.get_utf8_value(value) + + def make_request(self, action, params=None, path='/', verb='GET'): + http_request = self.build_base_http_request(verb, path, None, + params, {}, '', + self.host) + if action: + http_request.params['Action'] = action + if self.APIVersion: + http_request.params['Version'] = self.APIVersion + return self._mexe(http_request) + + def build_list_params(self, params, items, label): + if isinstance(items, six.string_types): + items = [items] + for i in range(1, len(items) + 1): + params['%s.%d' % (label, i)] = items[i - 1] + + def build_complex_list_params(self, params, items, label, names): + """Serialize a list of structures. + + For example:: + + items = [('foo', 'bar', 'baz'), ('foo2', 'bar2', 'baz2')] + label = 'ParamName.member' + names = ('One', 'Two', 'Three') + self.build_complex_list_params(params, items, label, names) + + would result in the params dict being updated with these params:: + + ParamName.member.1.One = foo + ParamName.member.1.Two = bar + ParamName.member.1.Three = baz + + ParamName.member.2.One = foo2 + ParamName.member.2.Two = bar2 + ParamName.member.2.Three = baz2 + + :type params: dict + :param params: The params dict. The complex list params + will be added to this dict. + + :type items: list of tuples + :param items: The list to serialize. + + :type label: string + :param label: The prefix to apply to the parameter. + + :type names: tuple of strings + :param names: The names associated with each tuple element. + + """ + for i, item in enumerate(items, 1): + current_prefix = '%s.%s' % (label, i) + for key, value in zip(names, item): + full_key = '%s.%s' % (current_prefix, key) + params[full_key] = value + + # generics + + def get_list(self, action, params, markers, path='/', + parent=None, verb='GET'): + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if not body: + boto.log.error('Null body %s' % body) + raise self.ResponseError(response.status, response.reason, body) + elif response.status == 200: + rs = ResultSet(markers) + h = boto.handler.XmlHandler(rs, parent) + if isinstance(body, six.text_type): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return rs + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def get_object(self, action, params, cls, path='/', + parent=None, verb='GET'): + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if not body: + boto.log.error('Null body %s' % body) + raise self.ResponseError(response.status, response.reason, body) + elif response.status == 200: + obj = cls(parent) + h = boto.handler.XmlHandler(obj, parent) + if isinstance(body, six.text_type): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return obj + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def get_status(self, action, params, path='/', parent=None, verb='GET'): + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if not body: + boto.log.error('Null body %s' % body) + raise self.ResponseError(response.status, response.reason, body) + elif response.status == 200: + rs = ResultSet() + h = boto.handler.XmlHandler(rs, parent) + xml.sax.parseString(body, h) + return rs.status + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/contrib/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/contrib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a7e571e236dbb9d9253e108f1e2326962811fd60 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/contrib/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/boto/contrib/ymlmessage.py b/desktop/core/ext-py/boto-2.38.0/boto/contrib/ymlmessage.py new file mode 100644 index 0000000000000000000000000000000000000000..ae6aea488e5114111343c75f6c6ead7dd5beda37 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/contrib/ymlmessage.py @@ -0,0 +1,53 @@ +# Copyright (c) 2006,2007 Chris Moyer +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +This module was contributed by Chris Moyer. It provides a subclass of the +SQS Message class that supports YAML as the body of the message. + +This module requires the yaml module. +""" +from boto.sqs.message import Message +import yaml + + +class YAMLMessage(Message): + """ + The YAMLMessage class provides a YAML compatible message. Encoding and + decoding are handled automaticaly. + + Access this message data like such: + + m.data = [ 1, 2, 3] + m.data[0] # Returns 1 + + This depends on the PyYAML package + """ + + def __init__(self, queue=None, body='', xml_attrs=None): + self.data = None + super(YAMLMessage, self).__init__(queue, body) + + def set_body(self, body): + self.data = yaml.load(body) + + def get_body(self): + return yaml.dump(self.data) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/datapipeline/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/datapipeline/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1f61ea67e9dd72331f2c1a62f9d1f6477ed47ecd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/datapipeline/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import get_regions + + +def regions(): + """ + Get all available regions for the AWS Datapipeline service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.datapipeline.layer1 import DataPipelineConnection + return get_regions('datapipeline', connection_cls=DataPipelineConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/datapipeline/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/datapipeline/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..c2761e25c1dbdcd1646f24dfa532d3e49e97b067 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/datapipeline/exceptions.py @@ -0,0 +1,42 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class PipelineDeletedException(JSONResponseError): + pass + + +class InvalidRequestException(JSONResponseError): + pass + + +class TaskNotFoundException(JSONResponseError): + pass + + +class PipelineNotFoundException(JSONResponseError): + pass + + +class InternalServiceError(JSONResponseError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/datapipeline/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/datapipeline/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..028fd9d2e31f3865cdd4049828e4fb7a44cadc88 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/datapipeline/layer1.py @@ -0,0 +1,639 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.datapipeline import exceptions + + +class DataPipelineConnection(AWSQueryConnection): + """ + This is the AWS Data Pipeline API Reference . This guide provides + descriptions and samples of the AWS Data Pipeline API. + + AWS Data Pipeline is a web service that configures and manages a + data-driven workflow called a pipeline. AWS Data Pipeline handles + the details of scheduling and ensuring that data dependencies are + met so your application can focus on processing the data. + + The AWS Data Pipeline API implements two main sets of + functionality. The first set of actions configure the pipeline in + the web service. You call these actions to create a pipeline and + define data sources, schedules, dependencies, and the transforms + to be performed on the data. + + The second set of actions are used by a task runner application + that calls the AWS Data Pipeline API to receive the next task + ready for processing. The logic for performing the task, such as + querying the data, running data analysis, or converting the data + from one format to another, is contained within the task runner. + The task runner performs the task assigned to it by the web + service, reporting progress to the web service as it does so. When + the task is done, the task runner reports the final success or + failure of the task to the web service. + + AWS Data Pipeline provides an open-source implementation of a task + runner called AWS Data Pipeline Task Runner. AWS Data Pipeline + Task Runner provides logic for common data management scenarios, + such as performing database queries and running data analysis + using Amazon Elastic MapReduce (Amazon EMR). You can use AWS Data + Pipeline Task Runner as your task runner, or you can write your + own task runner to provide custom data management. + + The AWS Data Pipeline API uses the Signature Version 4 protocol + for signing requests. For more information about how to sign a + request with this protocol, see `Signature Version 4 Signing + Process`_. In the code examples in this reference, the Signature + Version 4 Request parameters are represented as AuthParams. + """ + APIVersion = "2012-10-29" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "datapipeline.us-east-1.amazonaws.com" + ServiceName = "DataPipeline" + TargetPrefix = "DataPipeline" + ResponseError = JSONResponseError + + _faults = { + "PipelineDeletedException": exceptions.PipelineDeletedException, + "InvalidRequestException": exceptions.InvalidRequestException, + "TaskNotFoundException": exceptions.TaskNotFoundException, + "PipelineNotFoundException": exceptions.PipelineNotFoundException, + "InternalServiceError": exceptions.InternalServiceError, + } + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + kwargs['host'] = region.endpoint + super(DataPipelineConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def activate_pipeline(self, pipeline_id): + """ + Validates a pipeline and initiates processing. If the pipeline + does not pass validation, activation fails. + + Call this action to start processing pipeline tasks of a + pipeline you've created using the CreatePipeline and + PutPipelineDefinition actions. A pipeline cannot be modified + after it has been successfully activated. + + :type pipeline_id: string + :param pipeline_id: The identifier of the pipeline to activate. + + """ + params = {'pipelineId': pipeline_id, } + return self.make_request(action='ActivatePipeline', + body=json.dumps(params)) + + def create_pipeline(self, name, unique_id, description=None): + """ + Creates a new empty pipeline. When this action succeeds, you + can then use the PutPipelineDefinition action to populate the + pipeline. + + :type name: string + :param name: The name of the new pipeline. You can use the same name + for multiple pipelines associated with your AWS account, because + AWS Data Pipeline assigns each new pipeline a unique pipeline + identifier. + + :type unique_id: string + :param unique_id: A unique identifier that you specify. This identifier + is not the same as the pipeline identifier assigned by AWS Data + Pipeline. You are responsible for defining the format and ensuring + the uniqueness of this identifier. You use this parameter to ensure + idempotency during repeated calls to CreatePipeline. For example, + if the first call to CreatePipeline does not return a clear + success, you can pass in the same unique identifier and pipeline + name combination on a subsequent call to CreatePipeline. + CreatePipeline ensures that if a pipeline already exists with the + same name and unique identifier, a new pipeline will not be + created. Instead, you'll receive the pipeline identifier from the + previous attempt. The uniqueness of the name and unique identifier + combination is scoped to the AWS account or IAM user credentials. + + :type description: string + :param description: The description of the new pipeline. + + """ + params = {'name': name, 'uniqueId': unique_id, } + if description is not None: + params['description'] = description + return self.make_request(action='CreatePipeline', + body=json.dumps(params)) + + def delete_pipeline(self, pipeline_id): + """ + Permanently deletes a pipeline, its pipeline definition and + its run history. You cannot query or restore a deleted + pipeline. AWS Data Pipeline will attempt to cancel instances + associated with the pipeline that are currently being + processed by task runners. Deleting a pipeline cannot be + undone. + + To temporarily pause a pipeline instead of deleting it, call + SetStatus with the status set to Pause on individual + components. Components that are paused by SetStatus can be + resumed. + + :type pipeline_id: string + :param pipeline_id: The identifier of the pipeline to be deleted. + + """ + params = {'pipelineId': pipeline_id, } + return self.make_request(action='DeletePipeline', + body=json.dumps(params)) + + def describe_objects(self, object_ids, pipeline_id, marker=None, + evaluate_expressions=None): + """ + Returns the object definitions for a set of objects associated + with the pipeline. Object definitions are composed of a set of + fields that define the properties of the object. + + :type pipeline_id: string + :param pipeline_id: Identifier of the pipeline that contains the object + definitions. + + :type object_ids: list + :param object_ids: Identifiers of the pipeline objects that contain the + definitions to be described. You can pass as many as 25 identifiers + in a single call to DescribeObjects. + + :type evaluate_expressions: boolean + :param evaluate_expressions: Indicates whether any expressions in the + object should be evaluated when the object descriptions are + returned. + + :type marker: string + :param marker: The starting point for the results to be returned. The + first time you call DescribeObjects, this value should be empty. As + long as the action returns `HasMoreResults` as `True`, you can call + DescribeObjects again and pass the marker value from the response + to retrieve the next set of results. + + """ + params = { + 'pipelineId': pipeline_id, + 'objectIds': object_ids, + } + if evaluate_expressions is not None: + params['evaluateExpressions'] = evaluate_expressions + if marker is not None: + params['marker'] = marker + return self.make_request(action='DescribeObjects', + body=json.dumps(params)) + + def describe_pipelines(self, pipeline_ids): + """ + Retrieve metadata about one or more pipelines. The information + retrieved includes the name of the pipeline, the pipeline + identifier, its current state, and the user account that owns + the pipeline. Using account credentials, you can retrieve + metadata about pipelines that you or your IAM users have + created. If you are using an IAM user account, you can + retrieve metadata about only those pipelines you have read + permission for. + + To retrieve the full pipeline definition instead of metadata + about the pipeline, call the GetPipelineDefinition action. + + :type pipeline_ids: list + :param pipeline_ids: Identifiers of the pipelines to describe. You can + pass as many as 25 identifiers in a single call to + DescribePipelines. You can obtain pipeline identifiers by calling + ListPipelines. + + """ + params = {'pipelineIds': pipeline_ids, } + return self.make_request(action='DescribePipelines', + body=json.dumps(params)) + + def evaluate_expression(self, pipeline_id, expression, object_id): + """ + Evaluates a string in the context of a specified object. A + task runner can use this action to evaluate SQL queries stored + in Amazon S3. + + :type pipeline_id: string + :param pipeline_id: The identifier of the pipeline. + + :type object_id: string + :param object_id: The identifier of the object. + + :type expression: string + :param expression: The expression to evaluate. + + """ + params = { + 'pipelineId': pipeline_id, + 'objectId': object_id, + 'expression': expression, + } + return self.make_request(action='EvaluateExpression', + body=json.dumps(params)) + + def get_pipeline_definition(self, pipeline_id, version=None): + """ + Returns the definition of the specified pipeline. You can call + GetPipelineDefinition to retrieve the pipeline definition you + provided using PutPipelineDefinition. + + :type pipeline_id: string + :param pipeline_id: The identifier of the pipeline. + + :type version: string + :param version: The version of the pipeline definition to retrieve. + This parameter accepts the values `latest` (default) and `active`. + Where `latest` indicates the last definition saved to the pipeline + and `active` indicates the last definition of the pipeline that was + activated. + + """ + params = {'pipelineId': pipeline_id, } + if version is not None: + params['version'] = version + return self.make_request(action='GetPipelineDefinition', + body=json.dumps(params)) + + def list_pipelines(self, marker=None): + """ + Returns a list of pipeline identifiers for all active + pipelines. Identifiers are returned only for pipelines you + have permission to access. + + :type marker: string + :param marker: The starting point for the results to be returned. The + first time you call ListPipelines, this value should be empty. As + long as the action returns `HasMoreResults` as `True`, you can call + ListPipelines again and pass the marker value from the response to + retrieve the next set of results. + + """ + params = {} + if marker is not None: + params['marker'] = marker + return self.make_request(action='ListPipelines', + body=json.dumps(params)) + + def poll_for_task(self, worker_group, hostname=None, + instance_identity=None): + """ + Task runners call this action to receive a task to perform + from AWS Data Pipeline. The task runner specifies which tasks + it can perform by setting a value for the workerGroup + parameter of the PollForTask call. The task returned by + PollForTask may come from any of the pipelines that match the + workerGroup value passed in by the task runner and that was + launched using the IAM user credentials specified by the task + runner. + + If tasks are ready in the work queue, PollForTask returns a + response immediately. If no tasks are available in the queue, + PollForTask uses long-polling and holds on to a poll + connection for up to a 90 seconds during which time the first + newly scheduled task is handed to the task runner. To + accomodate this, set the socket timeout in your task runner to + 90 seconds. The task runner should not call PollForTask again + on the same `workerGroup` until it receives a response, and + this may take up to 90 seconds. + + :type worker_group: string + :param worker_group: Indicates the type of task the task runner is + configured to accept and process. The worker group is set as a + field on objects in the pipeline when they are created. You can + only specify a single value for `workerGroup` in the call to + PollForTask. There are no wildcard values permitted in + `workerGroup`, the string must be an exact, case-sensitive, match. + + :type hostname: string + :param hostname: The public DNS name of the calling task runner. + + :type instance_identity: dict + :param instance_identity: Identity information for the Amazon EC2 + instance that is hosting the task runner. You can get this value by + calling the URI, `http://169.254.169.254/latest/meta-data/instance- + id`, from the EC2 instance. For more information, go to `Instance + Metadata`_ in the Amazon Elastic Compute Cloud User Guide. Passing + in this value proves that your task runner is running on an EC2 + instance, and ensures the proper AWS Data Pipeline service charges + are applied to your pipeline. + + """ + params = {'workerGroup': worker_group, } + if hostname is not None: + params['hostname'] = hostname + if instance_identity is not None: + params['instanceIdentity'] = instance_identity + return self.make_request(action='PollForTask', + body=json.dumps(params)) + + def put_pipeline_definition(self, pipeline_objects, pipeline_id): + """ + Adds tasks, schedules, and preconditions that control the + behavior of the pipeline. You can use PutPipelineDefinition to + populate a new pipeline or to update an existing pipeline that + has not yet been activated. + + PutPipelineDefinition also validates the configuration as it + adds it to the pipeline. Changes to the pipeline are saved + unless one of the following three validation errors exists in + the pipeline. + + #. An object is missing a name or identifier field. + #. A string or reference field is empty. + #. The number of objects in the pipeline exceeds the maximum + allowed objects. + + + + Pipeline object definitions are passed to the + PutPipelineDefinition action and returned by the + GetPipelineDefinition action. + + :type pipeline_id: string + :param pipeline_id: The identifier of the pipeline to be configured. + + :type pipeline_objects: list + :param pipeline_objects: The objects that define the pipeline. These + will overwrite the existing pipeline definition. + + """ + params = { + 'pipelineId': pipeline_id, + 'pipelineObjects': pipeline_objects, + } + return self.make_request(action='PutPipelineDefinition', + body=json.dumps(params)) + + def query_objects(self, pipeline_id, sphere, marker=None, query=None, + limit=None): + """ + Queries a pipeline for the names of objects that match a + specified set of conditions. + + The objects returned by QueryObjects are paginated and then + filtered by the value you set for query. This means the action + may return an empty result set with a value set for marker. If + `HasMoreResults` is set to `True`, you should continue to call + QueryObjects, passing in the returned value for marker, until + `HasMoreResults` returns `False`. + + :type pipeline_id: string + :param pipeline_id: Identifier of the pipeline to be queried for object + names. + + :type query: dict + :param query: Query that defines the objects to be returned. The Query + object can contain a maximum of ten selectors. The conditions in + the query are limited to top-level String fields in the object. + These filters can be applied to components, instances, and + attempts. + + :type sphere: string + :param sphere: Specifies whether the query applies to components or + instances. Allowable values: `COMPONENT`, `INSTANCE`, `ATTEMPT`. + + :type marker: string + :param marker: The starting point for the results to be returned. The + first time you call QueryObjects, this value should be empty. As + long as the action returns `HasMoreResults` as `True`, you can call + QueryObjects again and pass the marker value from the response to + retrieve the next set of results. + + :type limit: integer + :param limit: Specifies the maximum number of object names that + QueryObjects will return in a single call. The default value is + 100. + + """ + params = {'pipelineId': pipeline_id, 'sphere': sphere, } + if query is not None: + params['query'] = query + if marker is not None: + params['marker'] = marker + if limit is not None: + params['limit'] = limit + return self.make_request(action='QueryObjects', + body=json.dumps(params)) + + def report_task_progress(self, task_id): + """ + Updates the AWS Data Pipeline service on the progress of the + calling task runner. When the task runner is assigned a task, + it should call ReportTaskProgress to acknowledge that it has + the task within 2 minutes. If the web service does not recieve + this acknowledgement within the 2 minute window, it will + assign the task in a subsequent PollForTask call. After this + initial acknowledgement, the task runner only needs to report + progress every 15 minutes to maintain its ownership of the + task. You can change this reporting time from 15 minutes by + specifying a `reportProgressTimeout` field in your pipeline. + If a task runner does not report its status after 5 minutes, + AWS Data Pipeline will assume that the task runner is unable + to process the task and will reassign the task in a subsequent + response to PollForTask. task runners should call + ReportTaskProgress every 60 seconds. + + :type task_id: string + :param task_id: Identifier of the task assigned to the task runner. + This value is provided in the TaskObject that the service returns + with the response for the PollForTask action. + + """ + params = {'taskId': task_id, } + return self.make_request(action='ReportTaskProgress', + body=json.dumps(params)) + + def report_task_runner_heartbeat(self, taskrunner_id, worker_group=None, + hostname=None): + """ + Task runners call ReportTaskRunnerHeartbeat every 15 minutes + to indicate that they are operational. In the case of AWS Data + Pipeline Task Runner launched on a resource managed by AWS + Data Pipeline, the web service can use this call to detect + when the task runner application has failed and restart a new + instance. + + :type taskrunner_id: string + :param taskrunner_id: The identifier of the task runner. This value + should be unique across your AWS account. In the case of AWS Data + Pipeline Task Runner launched on a resource managed by AWS Data + Pipeline, the web service provides a unique identifier when it + launches the application. If you have written a custom task runner, + you should assign a unique identifier for the task runner. + + :type worker_group: string + :param worker_group: Indicates the type of task the task runner is + configured to accept and process. The worker group is set as a + field on objects in the pipeline when they are created. You can + only specify a single value for `workerGroup` in the call to + ReportTaskRunnerHeartbeat. There are no wildcard values permitted + in `workerGroup`, the string must be an exact, case-sensitive, + match. + + :type hostname: string + :param hostname: The public DNS name of the calling task runner. + + """ + params = {'taskrunnerId': taskrunner_id, } + if worker_group is not None: + params['workerGroup'] = worker_group + if hostname is not None: + params['hostname'] = hostname + return self.make_request(action='ReportTaskRunnerHeartbeat', + body=json.dumps(params)) + + def set_status(self, object_ids, status, pipeline_id): + """ + Requests that the status of an array of physical or logical + pipeline objects be updated in the pipeline. This update may + not occur immediately, but is eventually consistent. The + status that can be set depends on the type of object. + + :type pipeline_id: string + :param pipeline_id: Identifies the pipeline that contains the objects. + + :type object_ids: list + :param object_ids: Identifies an array of objects. The corresponding + objects can be either physical or components, but not a mix of both + types. + + :type status: string + :param status: Specifies the status to be set on all the objects in + `objectIds`. For components, this can be either `PAUSE` or + `RESUME`. For instances, this can be either `CANCEL`, `RERUN`, or + `MARK_FINISHED`. + + """ + params = { + 'pipelineId': pipeline_id, + 'objectIds': object_ids, + 'status': status, + } + return self.make_request(action='SetStatus', + body=json.dumps(params)) + + def set_task_status(self, task_id, task_status, error_id=None, + error_message=None, error_stack_trace=None): + """ + Notifies AWS Data Pipeline that a task is completed and + provides information about the final status. The task runner + calls this action regardless of whether the task was + sucessful. The task runner does not need to call SetTaskStatus + for tasks that are canceled by the web service during a call + to ReportTaskProgress. + + :type task_id: string + :param task_id: Identifies the task assigned to the task runner. This + value is set in the TaskObject that is returned by the PollForTask + action. + + :type task_status: string + :param task_status: If `FINISHED`, the task successfully completed. If + `FAILED` the task ended unsuccessfully. The `FALSE` value is used + by preconditions. + + :type error_id: string + :param error_id: If an error occurred during the task, this value + specifies an id value that represents the error. This value is set + on the physical attempt object. It is used to display error + information to the user. It should not start with string "Service_" + which is reserved by the system. + + :type error_message: string + :param error_message: If an error occurred during the task, this value + specifies a text description of the error. This value is set on the + physical attempt object. It is used to display error information to + the user. The web service does not parse this value. + + :type error_stack_trace: string + :param error_stack_trace: If an error occurred during the task, this + value specifies the stack trace associated with the error. This + value is set on the physical attempt object. It is used to display + error information to the user. The web service does not parse this + value. + + """ + params = {'taskId': task_id, 'taskStatus': task_status, } + if error_id is not None: + params['errorId'] = error_id + if error_message is not None: + params['errorMessage'] = error_message + if error_stack_trace is not None: + params['errorStackTrace'] = error_stack_trace + return self.make_request(action='SetTaskStatus', + body=json.dumps(params)) + + def validate_pipeline_definition(self, pipeline_objects, pipeline_id): + """ + Tests the pipeline definition with a set of validation checks + to ensure that it is well formed and can run without error. + + :type pipeline_id: string + :param pipeline_id: Identifies the pipeline whose definition is to be + validated. + + :type pipeline_objects: list + :param pipeline_objects: A list of objects that define the pipeline + changes to validate against the pipeline. + + """ + params = { + 'pipelineId': pipeline_id, + 'pipelineObjects': pipeline_objects, + } + return self.make_request(action='ValidatePipelineDefinition', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/directconnect/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/directconnect/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2603177d62eb1ff48eee79c9f2e6e3b3260a1754 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/directconnect/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS DirectConnect service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.directconnect.layer1 import DirectConnectConnection + return get_regions('directconnect', connection_cls=DirectConnectConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/directconnect/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/directconnect/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..88168d302c69b0a6b014841190753a880e5b0a5e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/directconnect/exceptions.py @@ -0,0 +1,29 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class DirectConnectClientException(Exception): + pass + + +class DirectConnectServerException(Exception): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/directconnect/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/directconnect/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..a332b31bdd3d00b817ab5d04b0cae597a8ab4364 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/directconnect/layer1.py @@ -0,0 +1,627 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.directconnect import exceptions +from boto.compat import json + + +class DirectConnectConnection(AWSQueryConnection): + """ + AWS Direct Connect makes it easy to establish a dedicated network + connection from your premises to Amazon Web Services (AWS). Using + AWS Direct Connect, you can establish private connectivity between + AWS and your data center, office, or colocation environment, which + in many cases can reduce your network costs, increase bandwidth + throughput, and provide a more consistent network experience than + Internet-based connections. + + The AWS Direct Connect API Reference provides descriptions, + syntax, and usage examples for each of the actions and data types + for AWS Direct Connect. Use the following links to get started + using the AWS Direct Connect API Reference : + + + + `Actions`_: An alphabetical list of all AWS Direct Connect + actions. + + `Data Types`_: An alphabetical list of all AWS Direct Connect + data types. + + `Common Query Parameters`_: Parameters that all Query actions + can use. + + `Common Errors`_: Client and server errors that all actions can + return. + """ + APIVersion = "2012-10-25" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "directconnect.us-east-1.amazonaws.com" + ServiceName = "DirectConnect" + TargetPrefix = "OvertureService" + ResponseError = JSONResponseError + + _faults = { + "DirectConnectClientException": exceptions.DirectConnectClientException, + "DirectConnectServerException": exceptions.DirectConnectServerException, + } + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + + super(DirectConnectConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def allocate_connection_on_interconnect(self, bandwidth, connection_name, + owner_account, interconnect_id, + vlan): + """ + Creates a hosted connection on an interconnect. + + Allocates a VLAN number and a specified amount of bandwidth + for use by a hosted connection on the given interconnect. + + :type bandwidth: string + :param bandwidth: Bandwidth of the connection. + Example: " 500Mbps " + + Default: None + + :type connection_name: string + :param connection_name: Name of the provisioned connection. + Example: " 500M Connection to AWS " + + Default: None + + :type owner_account: string + :param owner_account: Numeric account Id of the customer for whom the + connection will be provisioned. + Example: 123443215678 + + Default: None + + :type interconnect_id: string + :param interconnect_id: ID of the interconnect on which the connection + will be provisioned. + Example: dxcon-456abc78 + + Default: None + + :type vlan: integer + :param vlan: The dedicated VLAN provisioned to the connection. + Example: 101 + + Default: None + + """ + params = { + 'bandwidth': bandwidth, + 'connectionName': connection_name, + 'ownerAccount': owner_account, + 'interconnectId': interconnect_id, + 'vlan': vlan, + } + return self.make_request(action='AllocateConnectionOnInterconnect', + body=json.dumps(params)) + + def allocate_private_virtual_interface(self, connection_id, + owner_account, + new_private_virtual_interface_allocation): + """ + Provisions a private virtual interface to be owned by a + different customer. + + The owner of a connection calls this function to provision a + private virtual interface which will be owned by another AWS + customer. + + Virtual interfaces created using this function must be + confirmed by the virtual interface owner by calling + ConfirmPrivateVirtualInterface. Until this step has been + completed, the virtual interface will be in 'Confirming' + state, and will not be available for handling traffic. + + :type connection_id: string + :param connection_id: The connection ID on which the private virtual + interface is provisioned. + Default: None + + :type owner_account: string + :param owner_account: The AWS account that will own the new private + virtual interface. + Default: None + + :type new_private_virtual_interface_allocation: dict + :param new_private_virtual_interface_allocation: Detailed information + for the private virtual interface to be provisioned. + Default: None + + """ + params = { + 'connectionId': connection_id, + 'ownerAccount': owner_account, + 'newPrivateVirtualInterfaceAllocation': new_private_virtual_interface_allocation, + } + return self.make_request(action='AllocatePrivateVirtualInterface', + body=json.dumps(params)) + + def allocate_public_virtual_interface(self, connection_id, owner_account, + new_public_virtual_interface_allocation): + """ + Provisions a public virtual interface to be owned by a + different customer. + + The owner of a connection calls this function to provision a + public virtual interface which will be owned by another AWS + customer. + + Virtual interfaces created using this function must be + confirmed by the virtual interface owner by calling + ConfirmPublicVirtualInterface. Until this step has been + completed, the virtual interface will be in 'Confirming' + state, and will not be available for handling traffic. + + :type connection_id: string + :param connection_id: The connection ID on which the public virtual + interface is provisioned. + Default: None + + :type owner_account: string + :param owner_account: The AWS account that will own the new public + virtual interface. + Default: None + + :type new_public_virtual_interface_allocation: dict + :param new_public_virtual_interface_allocation: Detailed information + for the public virtual interface to be provisioned. + Default: None + + """ + params = { + 'connectionId': connection_id, + 'ownerAccount': owner_account, + 'newPublicVirtualInterfaceAllocation': new_public_virtual_interface_allocation, + } + return self.make_request(action='AllocatePublicVirtualInterface', + body=json.dumps(params)) + + def confirm_connection(self, connection_id): + """ + Confirm the creation of a hosted connection on an + interconnect. + + Upon creation, the hosted connection is initially in the + 'Ordering' state, and will remain in this state until the + owner calls ConfirmConnection to confirm creation of the + hosted connection. + + :type connection_id: string + :param connection_id: ID of the connection. + Example: dxcon-fg5678gh + + Default: None + + """ + params = {'connectionId': connection_id, } + return self.make_request(action='ConfirmConnection', + body=json.dumps(params)) + + def confirm_private_virtual_interface(self, virtual_interface_id, + virtual_gateway_id): + """ + Accept ownership of a private virtual interface created by + another customer. + + After the virtual interface owner calls this function, the + virtual interface will be created and attached to the given + virtual private gateway, and will be available for handling + traffic. + + :type virtual_interface_id: string + :param virtual_interface_id: ID of the virtual interface. + Example: dxvif-123dfg56 + + Default: None + + :type virtual_gateway_id: string + :param virtual_gateway_id: ID of the virtual private gateway that will + be attached to the virtual interface. + A virtual private gateway can be managed via the Amazon Virtual Private + Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action. + + Default: None + + """ + params = { + 'virtualInterfaceId': virtual_interface_id, + 'virtualGatewayId': virtual_gateway_id, + } + return self.make_request(action='ConfirmPrivateVirtualInterface', + body=json.dumps(params)) + + def confirm_public_virtual_interface(self, virtual_interface_id): + """ + Accept ownership of a public virtual interface created by + another customer. + + After the virtual interface owner calls this function, the + specified virtual interface will be created and made available + for handling traffic. + + :type virtual_interface_id: string + :param virtual_interface_id: ID of the virtual interface. + Example: dxvif-123dfg56 + + Default: None + + """ + params = {'virtualInterfaceId': virtual_interface_id, } + return self.make_request(action='ConfirmPublicVirtualInterface', + body=json.dumps(params)) + + def create_connection(self, location, bandwidth, connection_name): + """ + Creates a new connection between the customer network and a + specific AWS Direct Connect location. + + A connection links your internal network to an AWS Direct + Connect location over a standard 1 gigabit or 10 gigabit + Ethernet fiber-optic cable. One end of the cable is connected + to your router, the other to an AWS Direct Connect router. An + AWS Direct Connect location provides access to Amazon Web + Services in the region it is associated with. You can + establish connections with AWS Direct Connect locations in + multiple regions, but a connection in one region does not + provide connectivity to other regions. + + :type location: string + :param location: Where the connection is located. + Example: EqSV5 + + Default: None + + :type bandwidth: string + :param bandwidth: Bandwidth of the connection. + Example: 1Gbps + + Default: None + + :type connection_name: string + :param connection_name: The name of the connection. + Example: " My Connection to AWS " + + Default: None + + """ + params = { + 'location': location, + 'bandwidth': bandwidth, + 'connectionName': connection_name, + } + return self.make_request(action='CreateConnection', + body=json.dumps(params)) + + def create_interconnect(self, interconnect_name, bandwidth, location): + """ + Creates a new interconnect between a AWS Direct Connect + partner's network and a specific AWS Direct Connect location. + + An interconnect is a connection which is capable of hosting + other connections. The AWS Direct Connect partner can use an + interconnect to provide sub-1Gbps AWS Direct Connect service + to tier 2 customers who do not have their own connections. + Like a standard connection, an interconnect links the AWS + Direct Connect partner's network to an AWS Direct Connect + location over a standard 1 Gbps or 10 Gbps Ethernet fiber- + optic cable. One end is connected to the partner's router, the + other to an AWS Direct Connect router. + + For each end customer, the AWS Direct Connect partner + provisions a connection on their interconnect by calling + AllocateConnectionOnInterconnect. The end customer can then + connect to AWS resources by creating a virtual interface on + their connection, using the VLAN assigned to them by the AWS + Direct Connect partner. + + :type interconnect_name: string + :param interconnect_name: The name of the interconnect. + Example: " 1G Interconnect to AWS " + + Default: None + + :type bandwidth: string + :param bandwidth: The port bandwidth + Example: 1Gbps + + Default: None + + Available values: 1Gbps,10Gbps + + :type location: string + :param location: Where the interconnect is located + Example: EqSV5 + + Default: None + + """ + params = { + 'interconnectName': interconnect_name, + 'bandwidth': bandwidth, + 'location': location, + } + return self.make_request(action='CreateInterconnect', + body=json.dumps(params)) + + def create_private_virtual_interface(self, connection_id, + new_private_virtual_interface): + """ + Creates a new private virtual interface. A virtual interface + is the VLAN that transports AWS Direct Connect traffic. A + private virtual interface supports sending traffic to a single + virtual private cloud (VPC). + + :type connection_id: string + :param connection_id: ID of the connection. + Example: dxcon-fg5678gh + + Default: None + + :type new_private_virtual_interface: dict + :param new_private_virtual_interface: Detailed information for the + private virtual interface to be created. + Default: None + + """ + params = { + 'connectionId': connection_id, + 'newPrivateVirtualInterface': new_private_virtual_interface, + } + return self.make_request(action='CreatePrivateVirtualInterface', + body=json.dumps(params)) + + def create_public_virtual_interface(self, connection_id, + new_public_virtual_interface): + """ + Creates a new public virtual interface. A virtual interface is + the VLAN that transports AWS Direct Connect traffic. A public + virtual interface supports sending traffic to public services + of AWS such as Amazon Simple Storage Service (Amazon S3). + + :type connection_id: string + :param connection_id: ID of the connection. + Example: dxcon-fg5678gh + + Default: None + + :type new_public_virtual_interface: dict + :param new_public_virtual_interface: Detailed information for the + public virtual interface to be created. + Default: None + + """ + params = { + 'connectionId': connection_id, + 'newPublicVirtualInterface': new_public_virtual_interface, + } + return self.make_request(action='CreatePublicVirtualInterface', + body=json.dumps(params)) + + def delete_connection(self, connection_id): + """ + Deletes the connection. + + Deleting a connection only stops the AWS Direct Connect port + hour and data transfer charges. You need to cancel separately + with the providers any services or charges for cross-connects + or network circuits that connect you to the AWS Direct Connect + location. + + :type connection_id: string + :param connection_id: ID of the connection. + Example: dxcon-fg5678gh + + Default: None + + """ + params = {'connectionId': connection_id, } + return self.make_request(action='DeleteConnection', + body=json.dumps(params)) + + def delete_interconnect(self, interconnect_id): + """ + Deletes the specified interconnect. + + :type interconnect_id: string + :param interconnect_id: The ID of the interconnect. + Example: dxcon-abc123 + + """ + params = {'interconnectId': interconnect_id, } + return self.make_request(action='DeleteInterconnect', + body=json.dumps(params)) + + def delete_virtual_interface(self, virtual_interface_id): + """ + Deletes a virtual interface. + + :type virtual_interface_id: string + :param virtual_interface_id: ID of the virtual interface. + Example: dxvif-123dfg56 + + Default: None + + """ + params = {'virtualInterfaceId': virtual_interface_id, } + return self.make_request(action='DeleteVirtualInterface', + body=json.dumps(params)) + + def describe_connections(self, connection_id=None): + """ + Displays all connections in this region. + + If a connection ID is provided, the call returns only that + particular connection. + + :type connection_id: string + :param connection_id: ID of the connection. + Example: dxcon-fg5678gh + + Default: None + + """ + params = {} + if connection_id is not None: + params['connectionId'] = connection_id + return self.make_request(action='DescribeConnections', + body=json.dumps(params)) + + def describe_connections_on_interconnect(self, interconnect_id): + """ + Return a list of connections that have been provisioned on the + given interconnect. + + :type interconnect_id: string + :param interconnect_id: ID of the interconnect on which a list of + connection is provisioned. + Example: dxcon-abc123 + + Default: None + + """ + params = {'interconnectId': interconnect_id, } + return self.make_request(action='DescribeConnectionsOnInterconnect', + body=json.dumps(params)) + + def describe_interconnects(self, interconnect_id=None): + """ + Returns a list of interconnects owned by the AWS account. + + If an interconnect ID is provided, it will only return this + particular interconnect. + + :type interconnect_id: string + :param interconnect_id: The ID of the interconnect. + Example: dxcon-abc123 + + """ + params = {} + if interconnect_id is not None: + params['interconnectId'] = interconnect_id + return self.make_request(action='DescribeInterconnects', + body=json.dumps(params)) + + def describe_locations(self): + """ + Returns the list of AWS Direct Connect locations in the + current AWS region. These are the locations that may be + selected when calling CreateConnection or CreateInterconnect. + """ + params = {} + return self.make_request(action='DescribeLocations', + body=json.dumps(params)) + + def describe_virtual_gateways(self): + """ + Returns a list of virtual private gateways owned by the AWS + account. + + You can create one or more AWS Direct Connect private virtual + interfaces linking to a virtual private gateway. A virtual + private gateway can be managed via Amazon Virtual Private + Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action. + """ + params = {} + return self.make_request(action='DescribeVirtualGateways', + body=json.dumps(params)) + + def describe_virtual_interfaces(self, connection_id=None, + virtual_interface_id=None): + """ + Displays all virtual interfaces for an AWS account. Virtual + interfaces deleted fewer than 15 minutes before + DescribeVirtualInterfaces is called are also returned. If a + connection ID is included then only virtual interfaces + associated with this connection will be returned. If a virtual + interface ID is included then only a single virtual interface + will be returned. + + A virtual interface (VLAN) transmits the traffic between the + AWS Direct Connect location and the customer. + + If a connection ID is provided, only virtual interfaces + provisioned on the specified connection will be returned. If a + virtual interface ID is provided, only this particular virtual + interface will be returned. + + :type connection_id: string + :param connection_id: ID of the connection. + Example: dxcon-fg5678gh + + Default: None + + :type virtual_interface_id: string + :param virtual_interface_id: ID of the virtual interface. + Example: dxvif-123dfg56 + + Default: None + + """ + params = {} + if connection_id is not None: + params['connectionId'] = connection_id + if virtual_interface_id is not None: + params['virtualInterfaceId'] = virtual_interface_id + return self.make_request(action='DescribeVirtualInterfaces', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8d548167d8cfecf4aba4ee551c411a66948c6649 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon DynamoDB service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + import boto.dynamodb.layer2 + return get_regions('dynamodb', connection_cls=boto.dynamodb.layer2.Layer2) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/batch.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/batch.py new file mode 100644 index 0000000000000000000000000000000000000000..f30b8425c6a36e20335318b6a6f3ee3cecc5f28b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/batch.py @@ -0,0 +1,261 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import six + + +class Batch(object): + """ + Used to construct a BatchGet request. + + :ivar table: The Table object from which the item is retrieved. + + :ivar keys: A list of scalar or tuple values. Each element in the + list represents one Item to retrieve. If the schema for the + table has both a HashKey and a RangeKey, each element in the + list should be a tuple consisting of (hash_key, range_key). If + the schema for the table contains only a HashKey, each element + in the list should be a scalar value of the appropriate type + for the table schema. NOTE: The maximum number of items that + can be retrieved for a single operation is 100. Also, the + number of items retrieved is constrained by a 1 MB size limit. + + :ivar attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :ivar consistent_read: Specify whether or not to use a + consistent read. Defaults to False. + + """ + + def __init__(self, table, keys, attributes_to_get=None, + consistent_read=False): + self.table = table + self.keys = keys + self.attributes_to_get = attributes_to_get + self.consistent_read = consistent_read + + def to_dict(self): + """ + Convert the Batch object into the format required for Layer1. + """ + batch_dict = {} + key_list = [] + for key in self.keys: + if isinstance(key, tuple): + hash_key, range_key = key + else: + hash_key = key + range_key = None + k = self.table.layer2.build_key_from_values(self.table.schema, + hash_key, range_key) + key_list.append(k) + batch_dict['Keys'] = key_list + if self.attributes_to_get: + batch_dict['AttributesToGet'] = self.attributes_to_get + if self.consistent_read: + batch_dict['ConsistentRead'] = True + else: + batch_dict['ConsistentRead'] = False + return batch_dict + + +class BatchWrite(object): + """ + Used to construct a BatchWrite request. Each BatchWrite object + represents a collection of PutItem and DeleteItem requests for + a single Table. + + :ivar table: The Table object from which the item is retrieved. + + :ivar puts: A list of :class:`boto.dynamodb.item.Item` objects + that you want to write to DynamoDB. + + :ivar deletes: A list of scalar or tuple values. Each element in the + list represents one Item to delete. If the schema for the + table has both a HashKey and a RangeKey, each element in the + list should be a tuple consisting of (hash_key, range_key). If + the schema for the table contains only a HashKey, each element + in the list should be a scalar value of the appropriate type + for the table schema. + """ + + def __init__(self, table, puts=None, deletes=None): + self.table = table + self.puts = puts or [] + self.deletes = deletes or [] + + def to_dict(self): + """ + Convert the Batch object into the format required for Layer1. + """ + op_list = [] + for item in self.puts: + d = {'Item': self.table.layer2.dynamize_item(item)} + d = {'PutRequest': d} + op_list.append(d) + for key in self.deletes: + if isinstance(key, tuple): + hash_key, range_key = key + else: + hash_key = key + range_key = None + k = self.table.layer2.build_key_from_values(self.table.schema, + hash_key, range_key) + d = {'Key': k} + op_list.append({'DeleteRequest': d}) + return (self.table.name, op_list) + + +class BatchList(list): + """ + A subclass of a list object that contains a collection of + :class:`boto.dynamodb.batch.Batch` objects. + """ + + def __init__(self, layer2): + list.__init__(self) + self.unprocessed = None + self.layer2 = layer2 + + def add_batch(self, table, keys, attributes_to_get=None, + consistent_read=False): + """ + Add a Batch to this BatchList. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object in which the items are contained. + + :type keys: list + :param keys: A list of scalar or tuple values. Each element in the + list represents one Item to retrieve. If the schema for the + table has both a HashKey and a RangeKey, each element in the + list should be a tuple consisting of (hash_key, range_key). If + the schema for the table contains only a HashKey, each element + in the list should be a scalar value of the appropriate type + for the table schema. NOTE: The maximum number of items that + can be retrieved for a single operation is 100. Also, the + number of items retrieved is constrained by a 1 MB size limit. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + """ + self.append(Batch(table, keys, attributes_to_get, consistent_read)) + + def resubmit(self): + """ + Resubmit the batch to get the next result set. The request object is + rebuild from scratch meaning that all batch added between ``submit`` + and ``resubmit`` will be lost. + + Note: This method is experimental and subject to changes in future releases + """ + del self[:] + + if not self.unprocessed: + return None + + for table_name, table_req in six.iteritems(self.unprocessed): + table_keys = table_req['Keys'] + table = self.layer2.get_table(table_name) + + keys = [] + for key in table_keys: + h = key['HashKeyElement'] + r = None + if 'RangeKeyElement' in key: + r = key['RangeKeyElement'] + keys.append((h, r)) + + attributes_to_get = None + if 'AttributesToGet' in table_req: + attributes_to_get = table_req['AttributesToGet'] + + self.add_batch(table, keys, attributes_to_get=attributes_to_get) + + return self.submit() + + def submit(self): + res = self.layer2.batch_get_item(self) + if 'UnprocessedKeys' in res: + self.unprocessed = res['UnprocessedKeys'] + return res + + def to_dict(self): + """ + Convert a BatchList object into format required for Layer1. + """ + d = {} + for batch in self: + b = batch.to_dict() + if b['Keys']: + d[batch.table.name] = b + return d + + +class BatchWriteList(list): + """ + A subclass of a list object that contains a collection of + :class:`boto.dynamodb.batch.BatchWrite` objects. + """ + + def __init__(self, layer2): + list.__init__(self) + self.layer2 = layer2 + + def add_batch(self, table, puts=None, deletes=None): + """ + Add a BatchWrite to this BatchWriteList. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object in which the items are contained. + + :type puts: list of :class:`boto.dynamodb.item.Item` objects + :param puts: A list of items that you want to write to DynamoDB. + + :type deletes: A list + :param deletes: A list of scalar or tuple values. Each element + in the list represents one Item to delete. If the schema + for the table has both a HashKey and a RangeKey, each + element in the list should be a tuple consisting of + (hash_key, range_key). If the schema for the table + contains only a HashKey, each element in the list should + be a scalar value of the appropriate type for the table + schema. + """ + self.append(BatchWrite(table, puts, deletes)) + + def submit(self): + return self.layer2.batch_write_item(self) + + def to_dict(self): + """ + Convert a BatchWriteList object into format required for Layer1. + """ + d = {} + for batch in self: + table_name, batch_dict = batch.to_dict() + d[table_name] = batch_dict + return d diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/condition.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/condition.py new file mode 100644 index 0000000000000000000000000000000000000000..f5db538c29b93b36691228f24d996c34fabb80bb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/condition.py @@ -0,0 +1,170 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.dynamodb.types import dynamize_value + + +class Condition(object): + """ + Base class for conditions. Doesn't do a darn thing but allows + is to test if something is a Condition instance or not. + """ + + def __eq__(self, other): + if isinstance(other, Condition): + return self.to_dict() == other.to_dict() + +class ConditionNoArgs(Condition): + """ + Abstract class for Conditions that require no arguments, such + as NULL or NOT_NULL. + """ + + def __repr__(self): + return '%s' % self.__class__.__name__ + + def to_dict(self): + return {'ComparisonOperator': self.__class__.__name__} + + +class ConditionOneArg(Condition): + """ + Abstract class for Conditions that require a single argument + such as EQ or NE. + """ + + def __init__(self, v1): + self.v1 = v1 + + def __repr__(self): + return '%s:%s' % (self.__class__.__name__, self.v1) + + def to_dict(self): + return {'AttributeValueList': [dynamize_value(self.v1)], + 'ComparisonOperator': self.__class__.__name__} + + +class ConditionTwoArgs(Condition): + """ + Abstract class for Conditions that require two arguments. + The only example of this currently is BETWEEN. + """ + + def __init__(self, v1, v2): + self.v1 = v1 + self.v2 = v2 + + def __repr__(self): + return '%s(%s, %s)' % (self.__class__.__name__, self.v1, self.v2) + + def to_dict(self): + values = (self.v1, self.v2) + return {'AttributeValueList': [dynamize_value(v) for v in values], + 'ComparisonOperator': self.__class__.__name__} + + +class ConditionSeveralArgs(Condition): + """ + Abstract class for conditions that require several argument (ex: IN). + """ + + def __init__(self, values): + self.values = values + + def __repr__(self): + return '{0}({1})'.format(self.__class__.__name__, + ', '.join(self.values)) + + def to_dict(self): + return {'AttributeValueList': [dynamize_value(v) for v in self.values], + 'ComparisonOperator': self.__class__.__name__} + + +class EQ(ConditionOneArg): + + pass + + +class NE(ConditionOneArg): + + pass + + +class LE(ConditionOneArg): + + pass + + +class LT(ConditionOneArg): + + pass + + +class GE(ConditionOneArg): + + pass + + +class GT(ConditionOneArg): + + pass + + +class NULL(ConditionNoArgs): + + pass + + +class NOT_NULL(ConditionNoArgs): + + pass + + +class CONTAINS(ConditionOneArg): + + pass + + +class NOT_CONTAINS(ConditionOneArg): + + pass + + +class BEGINS_WITH(ConditionOneArg): + + pass + + +class IN(ConditionSeveralArgs): + + pass + + +class BEGINS_WITH(ConditionOneArg): + + pass + + +class BETWEEN(ConditionTwoArgs): + + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..12be2d72b5f5e407fd04582b3ea7e0375424e7a1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/exceptions.py @@ -0,0 +1,64 @@ +""" +Exceptions that are specific to the dynamodb module. +""" +from boto.exception import BotoServerError, BotoClientError +from boto.exception import DynamoDBResponseError + + +class DynamoDBExpiredTokenError(BotoServerError): + """ + Raised when a DynamoDB security token expires. This is generally boto's + (or the user's) notice to renew their DynamoDB security tokens. + """ + pass + + +class DynamoDBKeyNotFoundError(BotoClientError): + """ + Raised when attempting to retrieve or interact with an item whose key + can't be found. + """ + pass + + +class DynamoDBItemError(BotoClientError): + """ + Raised when invalid parameters are passed when creating a + new Item in DynamoDB. + """ + pass + + +class DynamoDBNumberError(BotoClientError): + """ + Raised in the event of incompatible numeric type casting. + """ + pass + + +class DynamoDBConditionalCheckFailedError(DynamoDBResponseError): + """ + Raised when a ConditionalCheckFailedException response is received. + This happens when a conditional check, expressed via the expected_value + paramenter, fails. + """ + pass + + +class DynamoDBValidationError(DynamoDBResponseError): + """ + Raised when a ValidationException response is received. This happens + when one or more required parameter values are missing, or if the item + has exceeded the 64Kb size limit. + """ + pass + + +class DynamoDBThroughputExceededError(DynamoDBResponseError): + """ + Raised when the provisioned throughput has been exceeded. + Normally, when provisioned throughput is exceeded the operation + is retried. If the retries are exhausted then this exception + will be raised. + """ + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/item.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/item.py new file mode 100644 index 0000000000000000000000000000000000000000..a47f22bf0e1f0d9cdca543aa1d1c8a607ac58215 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/item.py @@ -0,0 +1,202 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.dynamodb.exceptions import DynamoDBItemError + + +class Item(dict): + """ + An item in Amazon DynamoDB. + + :ivar hash_key: The HashKey of this item. + :ivar range_key: The RangeKey of this item or None if no RangeKey + is defined. + :ivar hash_key_name: The name of the HashKey associated with this item. + :ivar range_key_name: The name of the RangeKey associated with this item. + :ivar table: The Table this item belongs to. + """ + + def __init__(self, table, hash_key=None, range_key=None, attrs=None): + self.table = table + self._updates = None + self._hash_key_name = self.table.schema.hash_key_name + self._range_key_name = self.table.schema.range_key_name + if attrs is None: + attrs = {} + if hash_key is None: + hash_key = attrs.get(self._hash_key_name, None) + self[self._hash_key_name] = hash_key + if self._range_key_name: + if range_key is None: + range_key = attrs.get(self._range_key_name, None) + self[self._range_key_name] = range_key + self._updates = {} + for key, value in attrs.items(): + if key != self._hash_key_name and key != self._range_key_name: + self[key] = value + self.consumed_units = 0 + + @property + def hash_key(self): + return self[self._hash_key_name] + + @property + def range_key(self): + return self.get(self._range_key_name) + + @property + def hash_key_name(self): + return self._hash_key_name + + @property + def range_key_name(self): + return self._range_key_name + + def add_attribute(self, attr_name, attr_value): + """ + Queue the addition of an attribute to an item in DynamoDB. + This will eventually result in an UpdateItem request being issued + with an update action of ADD when the save method is called. + + :type attr_name: str + :param attr_name: Name of the attribute you want to alter. + + :type attr_value: int|long|float|set + :param attr_value: Value which is to be added to the attribute. + """ + self._updates[attr_name] = ("ADD", attr_value) + + def delete_attribute(self, attr_name, attr_value=None): + """ + Queue the deletion of an attribute from an item in DynamoDB. + This call will result in a UpdateItem request being issued + with update action of DELETE when the save method is called. + + :type attr_name: str + :param attr_name: Name of the attribute you want to alter. + + :type attr_value: set + :param attr_value: A set of values to be removed from the attribute. + This parameter is optional. If None, the whole attribute is + removed from the item. + """ + self._updates[attr_name] = ("DELETE", attr_value) + + def put_attribute(self, attr_name, attr_value): + """ + Queue the putting of an attribute to an item in DynamoDB. + This call will result in an UpdateItem request being issued + with the update action of PUT when the save method is called. + + :type attr_name: str + :param attr_name: Name of the attribute you want to alter. + + :type attr_value: int|long|float|str|set + :param attr_value: New value of the attribute. + """ + self._updates[attr_name] = ("PUT", attr_value) + + def save(self, expected_value=None, return_values=None): + """ + Commits pending updates to Amazon DynamoDB. + + :type expected_value: dict + :param expected_value: A dictionary of name/value pairs that + you expect. This dictionary should have name/value pairs + where the name is the name of the attribute and the value is + either the value you are expecting or False if you expect + the attribute not to exist. + + :type return_values: str + :param return_values: Controls the return of attribute name/value pairs + before they were updated. Possible values are: None, 'ALL_OLD', + 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is + specified and the item is overwritten, the content of the old item + is returned. If 'ALL_NEW' is specified, then all the attributes of + the new version of the item are returned. If 'UPDATED_NEW' is + specified, the new versions of only the updated attributes are + returned. + """ + return self.table.layer2.update_item(self, expected_value, + return_values) + + def delete(self, expected_value=None, return_values=None): + """ + Delete the item from DynamoDB. + + :type expected_value: dict + :param expected_value: A dictionary of name/value pairs that + you expect. This dictionary should have name/value pairs + where the name is the name of the attribute and the value + is either the value you are expecting or False if you expect + the attribute not to exist. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + return self.table.layer2.delete_item(self, expected_value, + return_values) + + def put(self, expected_value=None, return_values=None): + """ + Store a new item or completely replace an existing item + in Amazon DynamoDB. + + :type expected_value: dict + :param expected_value: A dictionary of name/value pairs that + you expect. This dictionary should have name/value pairs + where the name is the name of the attribute and the value + is either the value you are expecting or False if you expect + the attribute not to exist. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + return self.table.layer2.put_item(self, expected_value, return_values) + + def __setitem__(self, key, value): + """Overrwrite the setter to instead update the _updates + method so this can act like a normal dict""" + if self._updates is not None: + self.put_attribute(key, value) + dict.__setitem__(self, key, value) + + def __delitem__(self, key): + """Remove this key from the items""" + if self._updates is not None: + self.delete_attribute(key) + dict.__delitem__(self, key) + + # Allow this item to still be pickled + def __getstate__(self): + return self.__dict__ + def __setstate__(self, d): + self.__dict__.update(d) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..0984f71ab46d194a20b7356b7be2f2e75cdbe67b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/layer1.py @@ -0,0 +1,577 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import time +from binascii import crc32 + +import boto +from boto.connection import AWSAuthConnection +from boto.exception import DynamoDBResponseError +from boto.provider import Provider +from boto.dynamodb import exceptions as dynamodb_exceptions +from boto.compat import json + + +class Layer1(AWSAuthConnection): + """ + This is the lowest-level interface to DynamoDB. Methods at this + layer map directly to API requests and parameters to the methods + are either simple, scalar values or they are the Python equivalent + of the JSON input as defined in the DynamoDB Developer's Guide. + All responses are direct decoding of the JSON response bodies to + Python data structures via the json or simplejson modules. + + :ivar throughput_exceeded_events: An integer variable that + keeps a running total of the number of ThroughputExceeded + responses this connection has received from Amazon DynamoDB. + """ + + DefaultRegionName = 'us-east-1' + """The default region name for DynamoDB API.""" + + ServiceName = 'DynamoDB' + """The name of the Service""" + + Version = '20111205' + """DynamoDB API version.""" + + ThruputError = "ProvisionedThroughputExceededException" + """The error response returned when provisioned throughput is exceeded""" + + SessionExpiredError = 'com.amazon.coral.service#ExpiredTokenException' + """The error response returned when session token has expired""" + + ConditionalCheckFailedError = 'ConditionalCheckFailedException' + """The error response returned when a conditional check fails""" + + ValidationError = 'ValidationException' + """The error response returned when an item is invalid in some way""" + + ResponseError = DynamoDBResponseError + + NumberRetries = 10 + """The number of times an error is retried.""" + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + debug=0, security_token=None, region=None, + validate_certs=True, validate_checksums=True, profile_name=None): + if not region: + region_name = boto.config.get('DynamoDB', 'region', + self.DefaultRegionName) + for reg in boto.dynamodb.regions(): + if reg.name == region_name: + region = reg + break + + self.region = region + super(Layer1, self).__init__(self.region.endpoint, + aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + debug=debug, security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + self.throughput_exceeded_events = 0 + self._validate_checksums = boto.config.getbool( + 'DynamoDB', 'validate_checksums', validate_checksums) + + def _get_session_token(self): + self.provider = Provider(self._provider_type) + self._auth_handler.update_provider(self.provider) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def make_request(self, action, body='', object_hook=None): + """ + :raises: ``DynamoDBExpiredTokenError`` if the security token expires. + """ + headers = {'X-Amz-Target': '%s_%s.%s' % (self.ServiceName, + self.Version, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.0', + 'Content-Length': str(len(body))} + http_request = self.build_base_http_request('POST', '/', '/', + {}, headers, body, None) + start = time.time() + response = self._mexe(http_request, sender=None, + override_num_retries=self.NumberRetries, + retry_handler=self._retry_handler) + elapsed = (time.time() - start) * 1000 + request_id = response.getheader('x-amzn-RequestId') + boto.log.debug('RequestId: %s' % request_id) + boto.perflog.debug('%s: id=%s time=%sms', + headers['X-Amz-Target'], request_id, int(elapsed)) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + return json.loads(response_body, object_hook=object_hook) + + def _retry_handler(self, response, i, next_sleep): + status = None + if response.status == 400: + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + data = json.loads(response_body) + if self.ThruputError in data.get('__type'): + self.throughput_exceeded_events += 1 + msg = "%s, retry attempt %s" % (self.ThruputError, i) + next_sleep = self._exponential_time(i) + i += 1 + status = (msg, i, next_sleep) + if i == self.NumberRetries: + # If this was our last retry attempt, raise + # a specific error saying that the throughput + # was exceeded. + raise dynamodb_exceptions.DynamoDBThroughputExceededError( + response.status, response.reason, data) + elif self.SessionExpiredError in data.get('__type'): + msg = 'Renewing Session Token' + self._get_session_token() + status = (msg, i + self.num_retries - 1, 0) + elif self.ConditionalCheckFailedError in data.get('__type'): + raise dynamodb_exceptions.DynamoDBConditionalCheckFailedError( + response.status, response.reason, data) + elif self.ValidationError in data.get('__type'): + raise dynamodb_exceptions.DynamoDBValidationError( + response.status, response.reason, data) + else: + raise self.ResponseError(response.status, response.reason, + data) + expected_crc32 = response.getheader('x-amz-crc32') + if self._validate_checksums and expected_crc32 is not None: + boto.log.debug('Validating crc32 checksum for body: %s', + response.read().decode('utf-8')) + actual_crc32 = crc32(response.read()) & 0xffffffff + expected_crc32 = int(expected_crc32) + if actual_crc32 != expected_crc32: + msg = ("The calculated checksum %s did not match the expected " + "checksum %s" % (actual_crc32, expected_crc32)) + status = (msg, i + 1, self._exponential_time(i)) + return status + + def _exponential_time(self, i): + if i == 0: + next_sleep = 0 + else: + next_sleep = min(0.05 * (2 ** i), + boto.config.get('Boto', 'max_retry_delay', 60)) + return next_sleep + + def list_tables(self, limit=None, start_table=None): + """ + Returns a dictionary of results. The dictionary contains + a **TableNames** key whose value is a list of the table names. + The dictionary could also contain a **LastEvaluatedTableName** + key whose value would be the last table name returned if + the complete list of table names was not returned. This + value would then be passed as the ``start_table`` parameter on + a subsequent call to this method. + + :type limit: int + :param limit: The maximum number of tables to return. + + :type start_table: str + :param start_table: The name of the table that starts the + list. If you ran a previous list_tables and not + all results were returned, the response dict would + include a LastEvaluatedTableName attribute. Use + that value here to continue the listing. + """ + data = {} + if limit: + data['Limit'] = limit + if start_table: + data['ExclusiveStartTableName'] = start_table + json_input = json.dumps(data) + return self.make_request('ListTables', json_input) + + def describe_table(self, table_name): + """ + Returns information about the table including current + state of the table, primary key schema and when the + table was created. + + :type table_name: str + :param table_name: The name of the table to describe. + """ + data = {'TableName': table_name} + json_input = json.dumps(data) + return self.make_request('DescribeTable', json_input) + + def create_table(self, table_name, schema, provisioned_throughput): + """ + Add a new table to your account. The table name must be unique + among those associated with the account issuing the request. + This request triggers an asynchronous workflow to begin creating + the table. When the workflow is complete, the state of the + table will be ACTIVE. + + :type table_name: str + :param table_name: The name of the table to create. + + :type schema: dict + :param schema: A Python version of the KeySchema data structure + as defined by DynamoDB + + :type provisioned_throughput: dict + :param provisioned_throughput: A Python version of the + ProvisionedThroughput data structure defined by + DynamoDB. + """ + data = {'TableName': table_name, + 'KeySchema': schema, + 'ProvisionedThroughput': provisioned_throughput} + json_input = json.dumps(data) + response_dict = self.make_request('CreateTable', json_input) + return response_dict + + def update_table(self, table_name, provisioned_throughput): + """ + Updates the provisioned throughput for a given table. + + :type table_name: str + :param table_name: The name of the table to update. + + :type provisioned_throughput: dict + :param provisioned_throughput: A Python version of the + ProvisionedThroughput data structure defined by + DynamoDB. + """ + data = {'TableName': table_name, + 'ProvisionedThroughput': provisioned_throughput} + json_input = json.dumps(data) + return self.make_request('UpdateTable', json_input) + + def delete_table(self, table_name): + """ + Deletes the table and all of it's data. After this request + the table will be in the DELETING state until DynamoDB + completes the delete operation. + + :type table_name: str + :param table_name: The name of the table to delete. + """ + data = {'TableName': table_name} + json_input = json.dumps(data) + return self.make_request('DeleteTable', json_input) + + def get_item(self, table_name, key, attributes_to_get=None, + consistent_read=False, object_hook=None): + """ + Return a set of attributes for an item that matches + the supplied key. + + :type table_name: str + :param table_name: The name of the table containing the item. + + :type key: dict + :param key: A Python version of the Key data structure + defined by DynamoDB. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + """ + data = {'TableName': table_name, + 'Key': key} + if attributes_to_get: + data['AttributesToGet'] = attributes_to_get + if consistent_read: + data['ConsistentRead'] = True + json_input = json.dumps(data) + response = self.make_request('GetItem', json_input, + object_hook=object_hook) + if 'Item' not in response: + raise dynamodb_exceptions.DynamoDBKeyNotFoundError( + "Key does not exist." + ) + return response + + def batch_get_item(self, request_items, object_hook=None): + """ + Return a set of attributes for a multiple items in + multiple tables using their primary keys. + + :type request_items: dict + :param request_items: A Python version of the RequestItems + data structure defined by DynamoDB. + """ + # If the list is empty, return empty response + if not request_items: + return {} + data = {'RequestItems': request_items} + json_input = json.dumps(data) + return self.make_request('BatchGetItem', json_input, + object_hook=object_hook) + + def batch_write_item(self, request_items, object_hook=None): + """ + This operation enables you to put or delete several items + across multiple tables in a single API call. + + :type request_items: dict + :param request_items: A Python version of the RequestItems + data structure defined by DynamoDB. + """ + data = {'RequestItems': request_items} + json_input = json.dumps(data) + return self.make_request('BatchWriteItem', json_input, + object_hook=object_hook) + + def put_item(self, table_name, item, + expected=None, return_values=None, + object_hook=None): + """ + Create a new item or replace an old item with a new + item (including all attributes). If an item already + exists in the specified table with the same primary + key, the new item will completely replace the old item. + You can perform a conditional put by specifying an + expected rule. + + :type table_name: str + :param table_name: The name of the table in which to put the item. + + :type item: dict + :param item: A Python version of the Item data structure + defined by DynamoDB. + + :type expected: dict + :param expected: A Python version of the Expected + data structure defined by DynamoDB. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + data = {'TableName': table_name, + 'Item': item} + if expected: + data['Expected'] = expected + if return_values: + data['ReturnValues'] = return_values + json_input = json.dumps(data) + return self.make_request('PutItem', json_input, + object_hook=object_hook) + + def update_item(self, table_name, key, attribute_updates, + expected=None, return_values=None, + object_hook=None): + """ + Edits an existing item's attributes. You can perform a conditional + update (insert a new attribute name-value pair if it doesn't exist, + or replace an existing name-value pair if it has certain expected + attribute values). + + :type table_name: str + :param table_name: The name of the table. + + :type key: dict + :param key: A Python version of the Key data structure + defined by DynamoDB which identifies the item to be updated. + + :type attribute_updates: dict + :param attribute_updates: A Python version of the AttributeUpdates + data structure defined by DynamoDB. + + :type expected: dict + :param expected: A Python version of the Expected + data structure defined by DynamoDB. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + data = {'TableName': table_name, + 'Key': key, + 'AttributeUpdates': attribute_updates} + if expected: + data['Expected'] = expected + if return_values: + data['ReturnValues'] = return_values + json_input = json.dumps(data) + return self.make_request('UpdateItem', json_input, + object_hook=object_hook) + + def delete_item(self, table_name, key, + expected=None, return_values=None, + object_hook=None): + """ + Delete an item and all of it's attributes by primary key. + You can perform a conditional delete by specifying an + expected rule. + + :type table_name: str + :param table_name: The name of the table containing the item. + + :type key: dict + :param key: A Python version of the Key data structure + defined by DynamoDB. + + :type expected: dict + :param expected: A Python version of the Expected + data structure defined by DynamoDB. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + data = {'TableName': table_name, + 'Key': key} + if expected: + data['Expected'] = expected + if return_values: + data['ReturnValues'] = return_values + json_input = json.dumps(data) + return self.make_request('DeleteItem', json_input, + object_hook=object_hook) + + def query(self, table_name, hash_key_value, range_key_conditions=None, + attributes_to_get=None, limit=None, consistent_read=False, + scan_index_forward=True, exclusive_start_key=None, + object_hook=None, count=False): + """ + Perform a query of DynamoDB. This version is currently punting + and expecting you to provide a full and correct JSON body + which is passed as is to DynamoDB. + + :type table_name: str + :param table_name: The name of the table to query. + + :type hash_key_value: dict + :param key: A DynamoDB-style HashKeyValue. + + :type range_key_conditions: dict + :param range_key_conditions: A Python version of the + RangeKeyConditions data structure. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type limit: int + :param limit: The maximum number of items to return. + + :type count: bool + :param count: If True, Amazon DynamoDB returns a total + number of items for the Query operation, even if the + operation has no matching items for the assigned filter. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + + :type scan_index_forward: bool + :param scan_index_forward: Specified forward or backward + traversal of the index. Default is forward (True). + + :type exclusive_start_key: list or tuple + :param exclusive_start_key: Primary key of the item from + which to continue an earlier query. This would be + provided as the LastEvaluatedKey in that query. + """ + data = {'TableName': table_name, + 'HashKeyValue': hash_key_value} + if range_key_conditions: + data['RangeKeyCondition'] = range_key_conditions + if attributes_to_get: + data['AttributesToGet'] = attributes_to_get + if limit: + data['Limit'] = limit + if count: + data['Count'] = True + if consistent_read: + data['ConsistentRead'] = True + if scan_index_forward: + data['ScanIndexForward'] = True + else: + data['ScanIndexForward'] = False + if exclusive_start_key: + data['ExclusiveStartKey'] = exclusive_start_key + json_input = json.dumps(data) + return self.make_request('Query', json_input, + object_hook=object_hook) + + def scan(self, table_name, scan_filter=None, + attributes_to_get=None, limit=None, + exclusive_start_key=None, object_hook=None, count=False): + """ + Perform a scan of DynamoDB. This version is currently punting + and expecting you to provide a full and correct JSON body + which is passed as is to DynamoDB. + + :type table_name: str + :param table_name: The name of the table to scan. + + :type scan_filter: dict + :param scan_filter: A Python version of the + ScanFilter data structure. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type limit: int + :param limit: The maximum number of items to evaluate. + + :type count: bool + :param count: If True, Amazon DynamoDB returns a total + number of items for the Scan operation, even if the + operation has no matching items for the assigned filter. + + :type exclusive_start_key: list or tuple + :param exclusive_start_key: Primary key of the item from + which to continue an earlier query. This would be + provided as the LastEvaluatedKey in that query. + """ + data = {'TableName': table_name} + if scan_filter: + data['ScanFilter'] = scan_filter + if attributes_to_get: + data['AttributesToGet'] = attributes_to_get + if limit: + data['Limit'] = limit + if count: + data['Count'] = True + if exclusive_start_key: + data['ExclusiveStartKey'] = exclusive_start_key + json_input = json.dumps(data) + return self.make_request('Scan', json_input, object_hook=object_hook) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/layer2.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/layer2.py new file mode 100644 index 0000000000000000000000000000000000000000..9510d49927be5cce5a1591edf6be8668c4eecdcc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/layer2.py @@ -0,0 +1,806 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.dynamodb.layer1 import Layer1 +from boto.dynamodb.table import Table +from boto.dynamodb.schema import Schema +from boto.dynamodb.item import Item +from boto.dynamodb.batch import BatchList, BatchWriteList +from boto.dynamodb.types import get_dynamodb_type, Dynamizer, \ + LossyFloatDynamizer, NonBooleanDynamizer + + +class TableGenerator(object): + """ + This is an object that wraps up the table_generator function. + The only real reason to have this is that we want to be able + to accumulate and return the ConsumedCapacityUnits element that + is part of each response. + + :ivar last_evaluated_key: A sequence representing the key(s) + of the item last evaluated, or None if no additional + results are available. + + :ivar remaining: The remaining quantity of results requested. + + :ivar table: The table to which the call was made. + """ + + def __init__(self, table, callable, remaining, item_class, kwargs): + self.table = table + self.callable = callable + self.remaining = -1 if remaining is None else remaining + self.item_class = item_class + self.kwargs = kwargs + self._consumed_units = 0.0 + self.last_evaluated_key = None + self._count = 0 + self._scanned_count = 0 + self._response = None + + @property + def count(self): + """ + The total number of items retrieved thus far. This value changes with + iteration and even when issuing a call with count=True, it is necessary + to complete the iteration to assert an accurate count value. + """ + self.response + return self._count + + @property + def scanned_count(self): + """ + As above, but representing the total number of items scanned by + DynamoDB, without regard to any filters. + """ + self.response + return self._scanned_count + + @property + def consumed_units(self): + """ + Returns a float representing the ConsumedCapacityUnits accumulated. + """ + self.response + return self._consumed_units + + @property + def response(self): + """ + The current response to the call from DynamoDB. + """ + return self.next_response() if self._response is None else self._response + + def next_response(self): + """ + Issue a call and return the result. You can invoke this method + while iterating over the TableGenerator in order to skip to the + next "page" of results. + """ + # preserve any existing limit in case the user alters self.remaining + limit = self.kwargs.get('limit') + if (self.remaining > 0 and (limit is None or limit > self.remaining)): + self.kwargs['limit'] = self.remaining + self._response = self.callable(**self.kwargs) + self.kwargs['limit'] = limit + self._consumed_units += self._response.get('ConsumedCapacityUnits', 0.0) + self._count += self._response.get('Count', 0) + self._scanned_count += self._response.get('ScannedCount', 0) + # at the expense of a possibly gratuitous dynamize, ensure that + # early generator termination won't result in bad LEK values + if 'LastEvaluatedKey' in self._response: + lek = self._response['LastEvaluatedKey'] + esk = self.table.layer2.dynamize_last_evaluated_key(lek) + self.kwargs['exclusive_start_key'] = esk + lektuple = (lek['HashKeyElement'],) + if 'RangeKeyElement' in lek: + lektuple += (lek['RangeKeyElement'],) + self.last_evaluated_key = lektuple + else: + self.last_evaluated_key = None + return self._response + + def __iter__(self): + while self.remaining != 0: + response = self.response + for item in response.get('Items', []): + self.remaining -= 1 + yield self.item_class(self.table, attrs=item) + if self.remaining == 0: + break + if response is not self._response: + break + else: + if self.last_evaluated_key is not None: + self.next_response() + continue + break + if response is not self._response: + continue + break + + +class Layer2(object): + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + debug=0, security_token=None, region=None, + validate_certs=True, dynamizer=LossyFloatDynamizer, + profile_name=None): + self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, + debug, security_token, region, + validate_certs=validate_certs, + profile_name=profile_name) + self.dynamizer = dynamizer() + + def use_decimals(self, use_boolean=False): + """ + Use the ``decimal.Decimal`` type for encoding/decoding numeric types. + + By default, ints/floats are used to represent numeric types + ('N', 'NS') received from DynamoDB. Using the ``Decimal`` + type is recommended to prevent loss of precision. + + """ + # Eventually this should be made the default dynamizer. + self.dynamizer = Dynamizer() if use_boolean else NonBooleanDynamizer() + + def dynamize_attribute_updates(self, pending_updates): + """ + Convert a set of pending item updates into the structure + required by Layer1. + """ + d = {} + for attr_name in pending_updates: + action, value = pending_updates[attr_name] + if value is None: + # DELETE without an attribute value + d[attr_name] = {"Action": action} + else: + d[attr_name] = {"Action": action, + "Value": self.dynamizer.encode(value)} + return d + + def dynamize_item(self, item): + d = {} + for attr_name in item: + d[attr_name] = self.dynamizer.encode(item[attr_name]) + return d + + def dynamize_range_key_condition(self, range_key_condition): + """ + Convert a layer2 range_key_condition parameter into the + structure required by Layer1. + """ + return range_key_condition.to_dict() + + def dynamize_scan_filter(self, scan_filter): + """ + Convert a layer2 scan_filter parameter into the + structure required by Layer1. + """ + d = None + if scan_filter: + d = {} + for attr_name in scan_filter: + condition = scan_filter[attr_name] + d[attr_name] = condition.to_dict() + return d + + def dynamize_expected_value(self, expected_value): + """ + Convert an expected_value parameter into the data structure + required for Layer1. + """ + d = None + if expected_value: + d = {} + for attr_name in expected_value: + attr_value = expected_value[attr_name] + if attr_value is True: + attr_value = {'Exists': True} + elif attr_value is False: + attr_value = {'Exists': False} + else: + val = self.dynamizer.encode(expected_value[attr_name]) + attr_value = {'Value': val} + d[attr_name] = attr_value + return d + + def dynamize_last_evaluated_key(self, last_evaluated_key): + """ + Convert a last_evaluated_key parameter into the data structure + required for Layer1. + """ + d = None + if last_evaluated_key: + hash_key = last_evaluated_key['HashKeyElement'] + d = {'HashKeyElement': self.dynamizer.encode(hash_key)} + if 'RangeKeyElement' in last_evaluated_key: + range_key = last_evaluated_key['RangeKeyElement'] + d['RangeKeyElement'] = self.dynamizer.encode(range_key) + return d + + def build_key_from_values(self, schema, hash_key, range_key=None): + """ + Build a Key structure to be used for accessing items + in Amazon DynamoDB. This method takes the supplied hash_key + and optional range_key and validates them against the + schema. If there is a mismatch, a TypeError is raised. + Otherwise, a Python dict version of a Amazon DynamoDB Key + data structure is returned. + + :type hash_key: int|float|str|unicode|Binary + :param hash_key: The hash key of the item you are looking for. + The type of the hash key should match the type defined in + the schema. + + :type range_key: int|float|str|unicode|Binary + :param range_key: The range key of the item your are looking for. + This should be supplied only if the schema requires a + range key. The type of the range key should match the + type defined in the schema. + """ + dynamodb_key = {} + dynamodb_value = self.dynamizer.encode(hash_key) + if list(dynamodb_value.keys())[0] != schema.hash_key_type: + msg = 'Hashkey must be of type: %s' % schema.hash_key_type + raise TypeError(msg) + dynamodb_key['HashKeyElement'] = dynamodb_value + if range_key is not None: + dynamodb_value = self.dynamizer.encode(range_key) + if list(dynamodb_value.keys())[0] != schema.range_key_type: + msg = 'RangeKey must be of type: %s' % schema.range_key_type + raise TypeError(msg) + dynamodb_key['RangeKeyElement'] = dynamodb_value + return dynamodb_key + + def new_batch_list(self): + """ + Return a new, empty :class:`boto.dynamodb.batch.BatchList` + object. + """ + return BatchList(self) + + def new_batch_write_list(self): + """ + Return a new, empty :class:`boto.dynamodb.batch.BatchWriteList` + object. + """ + return BatchWriteList(self) + + def list_tables(self, limit=None): + """ + Return a list of the names of all tables associated with the + current account and region. + + :type limit: int + :param limit: The maximum number of tables to return. + """ + tables = [] + start_table = None + while not limit or len(tables) < limit: + this_round_limit = None + if limit: + this_round_limit = limit - len(tables) + this_round_limit = min(this_round_limit, 100) + result = self.layer1.list_tables(limit=this_round_limit, start_table=start_table) + tables.extend(result.get('TableNames', [])) + start_table = result.get('LastEvaluatedTableName', None) + if not start_table: + break + return tables + + def describe_table(self, name): + """ + Retrieve information about an existing table. + + :type name: str + :param name: The name of the desired table. + + """ + return self.layer1.describe_table(name) + + def table_from_schema(self, name, schema): + """ + Create a Table object from a schema. + + This method will create a Table object without + making any API calls. If you know the name and schema + of the table, you can use this method instead of + ``get_table``. + + Example usage:: + + table = layer2.table_from_schema( + 'tablename', + Schema.create(hash_key=('foo', 'N'))) + + :type name: str + :param name: The name of the table. + + :type schema: :class:`boto.dynamodb.schema.Schema` + :param schema: The schema associated with the table. + + :rtype: :class:`boto.dynamodb.table.Table` + :return: A Table object representing the table. + + """ + return Table.create_from_schema(self, name, schema) + + def get_table(self, name): + """ + Retrieve the Table object for an existing table. + + :type name: str + :param name: The name of the desired table. + + :rtype: :class:`boto.dynamodb.table.Table` + :return: A Table object representing the table. + """ + response = self.layer1.describe_table(name) + return Table(self, response) + + lookup = get_table + + def create_table(self, name, schema, read_units, write_units): + """ + Create a new Amazon DynamoDB table. + + :type name: str + :param name: The name of the desired table. + + :type schema: :class:`boto.dynamodb.schema.Schema` + :param schema: The Schema object that defines the schema used + by this table. + + :type read_units: int + :param read_units: The value for ReadCapacityUnits. + + :type write_units: int + :param write_units: The value for WriteCapacityUnits. + + :rtype: :class:`boto.dynamodb.table.Table` + :return: A Table object representing the new Amazon DynamoDB table. + """ + response = self.layer1.create_table(name, schema.dict, + {'ReadCapacityUnits': read_units, + 'WriteCapacityUnits': write_units}) + return Table(self, response) + + def update_throughput(self, table, read_units, write_units): + """ + Update the ProvisionedThroughput for the Amazon DynamoDB Table. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object whose throughput is being updated. + + :type read_units: int + :param read_units: The new value for ReadCapacityUnits. + + :type write_units: int + :param write_units: The new value for WriteCapacityUnits. + """ + response = self.layer1.update_table(table.name, + {'ReadCapacityUnits': read_units, + 'WriteCapacityUnits': write_units}) + table.update_from_response(response) + + def delete_table(self, table): + """ + Delete this table and all items in it. After calling this + the Table objects status attribute will be set to 'DELETING'. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object that is being deleted. + """ + response = self.layer1.delete_table(table.name) + table.update_from_response(response) + + def create_schema(self, hash_key_name, hash_key_proto_value, + range_key_name=None, range_key_proto_value=None): + """ + Create a Schema object used when creating a Table. + + :type hash_key_name: str + :param hash_key_name: The name of the HashKey for the schema. + + :type hash_key_proto_value: int|long|float|str|unicode|Binary + :param hash_key_proto_value: A sample or prototype of the type + of value you want to use for the HashKey. Alternatively, + you can also just pass in the Python type (e.g. int, float, etc.). + + :type range_key_name: str + :param range_key_name: The name of the RangeKey for the schema. + This parameter is optional. + + :type range_key_proto_value: int|long|float|str|unicode|Binary + :param range_key_proto_value: A sample or prototype of the type + of value you want to use for the RangeKey. Alternatively, + you can also pass in the Python type (e.g. int, float, etc.) + This parameter is optional. + """ + hash_key = (hash_key_name, get_dynamodb_type(hash_key_proto_value)) + if range_key_name and range_key_proto_value is not None: + range_key = (range_key_name, + get_dynamodb_type(range_key_proto_value)) + else: + range_key = None + return Schema.create(hash_key, range_key) + + def get_item(self, table, hash_key, range_key=None, + attributes_to_get=None, consistent_read=False, + item_class=Item): + """ + Retrieve an existing item from the table. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object from which the item is retrieved. + + :type hash_key: int|long|float|str|unicode|Binary + :param hash_key: The HashKey of the requested item. The + type of the value must match the type defined in the + schema for the table. + + :type range_key: int|long|float|str|unicode|Binary + :param range_key: The optional RangeKey of the requested item. + The type of the value must match the type defined in the + schema for the table. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + """ + key = self.build_key_from_values(table.schema, hash_key, range_key) + response = self.layer1.get_item(table.name, key, + attributes_to_get, consistent_read, + object_hook=self.dynamizer.decode) + item = item_class(table, hash_key, range_key, response['Item']) + if 'ConsumedCapacityUnits' in response: + item.consumed_units = response['ConsumedCapacityUnits'] + return item + + def batch_get_item(self, batch_list): + """ + Return a set of attributes for a multiple items in + multiple tables using their primary keys. + + :type batch_list: :class:`boto.dynamodb.batch.BatchList` + :param batch_list: A BatchList object which consists of a + list of :class:`boto.dynamoddb.batch.Batch` objects. + Each Batch object contains the information about one + batch of objects that you wish to retrieve in this + request. + """ + request_items = batch_list.to_dict() + return self.layer1.batch_get_item(request_items, + object_hook=self.dynamizer.decode) + + def batch_write_item(self, batch_list): + """ + Performs multiple Puts and Deletes in one batch. + + :type batch_list: :class:`boto.dynamodb.batch.BatchWriteList` + :param batch_list: A BatchWriteList object which consists of a + list of :class:`boto.dynamoddb.batch.BatchWrite` objects. + Each Batch object contains the information about one + batch of objects that you wish to put or delete. + """ + request_items = batch_list.to_dict() + return self.layer1.batch_write_item(request_items, + object_hook=self.dynamizer.decode) + + def put_item(self, item, expected_value=None, return_values=None): + """ + Store a new item or completely replace an existing item + in Amazon DynamoDB. + + :type item: :class:`boto.dynamodb.item.Item` + :param item: The Item to write to Amazon DynamoDB. + + :type expected_value: dict + :param expected_value: A dictionary of name/value pairs that you expect. + This dictionary should have name/value pairs where the name + is the name of the attribute and the value is either the value + you are expecting or False if you expect the attribute not to + exist. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + expected_value = self.dynamize_expected_value(expected_value) + response = self.layer1.put_item(item.table.name, + self.dynamize_item(item), + expected_value, return_values, + object_hook=self.dynamizer.decode) + if 'ConsumedCapacityUnits' in response: + item.consumed_units = response['ConsumedCapacityUnits'] + return response + + def update_item(self, item, expected_value=None, return_values=None): + """ + Commit pending item updates to Amazon DynamoDB. + + :type item: :class:`boto.dynamodb.item.Item` + :param item: The Item to update in Amazon DynamoDB. It is expected + that you would have called the add_attribute, put_attribute + and/or delete_attribute methods on this Item prior to calling + this method. Those queued changes are what will be updated. + + :type expected_value: dict + :param expected_value: A dictionary of name/value pairs that you + expect. This dictionary should have name/value pairs where the + name is the name of the attribute and the value is either the + value you are expecting or False if you expect the attribute + not to exist. + + :type return_values: str + :param return_values: Controls the return of attribute name/value pairs + before they were updated. Possible values are: None, 'ALL_OLD', + 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is + specified and the item is overwritten, the content of the old item + is returned. If 'ALL_NEW' is specified, then all the attributes of + the new version of the item are returned. If 'UPDATED_NEW' is + specified, the new versions of only the updated attributes are + returned. + + """ + expected_value = self.dynamize_expected_value(expected_value) + key = self.build_key_from_values(item.table.schema, + item.hash_key, item.range_key) + attr_updates = self.dynamize_attribute_updates(item._updates) + + response = self.layer1.update_item(item.table.name, key, + attr_updates, + expected_value, return_values, + object_hook=self.dynamizer.decode) + item._updates.clear() + if 'ConsumedCapacityUnits' in response: + item.consumed_units = response['ConsumedCapacityUnits'] + return response + + def delete_item(self, item, expected_value=None, return_values=None): + """ + Delete the item from Amazon DynamoDB. + + :type item: :class:`boto.dynamodb.item.Item` + :param item: The Item to delete from Amazon DynamoDB. + + :type expected_value: dict + :param expected_value: A dictionary of name/value pairs that you expect. + This dictionary should have name/value pairs where the name + is the name of the attribute and the value is either the value + you are expecting or False if you expect the attribute not to + exist. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + expected_value = self.dynamize_expected_value(expected_value) + key = self.build_key_from_values(item.table.schema, + item.hash_key, item.range_key) + return self.layer1.delete_item(item.table.name, key, + expected=expected_value, + return_values=return_values, + object_hook=self.dynamizer.decode) + + def query(self, table, hash_key, range_key_condition=None, + attributes_to_get=None, request_limit=None, + max_results=None, consistent_read=False, + scan_index_forward=True, exclusive_start_key=None, + item_class=Item, count=False): + """ + Perform a query on the table. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object that is being queried. + + :type hash_key: int|long|float|str|unicode|Binary + :param hash_key: The HashKey of the requested item. The + type of the value must match the type defined in the + schema for the table. + + :type range_key_condition: :class:`boto.dynamodb.condition.Condition` + :param range_key_condition: A Condition object. + Condition object can be one of the following types: + + EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN + + The only condition which expects or will accept two + values is 'BETWEEN', otherwise a single value should + be passed to the Condition constructor. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type request_limit: int + :param request_limit: The maximum number of items to retrieve + from Amazon DynamoDB on each request. You may want to set + a specific request_limit based on the provisioned throughput + of your table. The default behavior is to retrieve as many + results as possible per request. + + :type max_results: int + :param max_results: The maximum number of results that will + be retrieved from Amazon DynamoDB in total. For example, + if you only wanted to see the first 100 results from the + query, regardless of how many were actually available, you + could set max_results to 100 and the generator returned + from the query method will only yeild 100 results max. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + + :type scan_index_forward: bool + :param scan_index_forward: Specified forward or backward + traversal of the index. Default is forward (True). + + :type count: bool + :param count: If True, Amazon DynamoDB returns a total + number of items for the Query operation, even if the + operation has no matching items for the assigned filter. + If count is True, the actual items are not returned and + the count is accessible as the ``count`` attribute of + the returned object. + + :type exclusive_start_key: list or tuple + :param exclusive_start_key: Primary key of the item from + which to continue an earlier query. This would be + provided as the LastEvaluatedKey in that query. + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + + :rtype: :class:`boto.dynamodb.layer2.TableGenerator` + """ + if range_key_condition: + rkc = self.dynamize_range_key_condition(range_key_condition) + else: + rkc = None + if exclusive_start_key: + esk = self.build_key_from_values(table.schema, + *exclusive_start_key) + else: + esk = None + kwargs = {'table_name': table.name, + 'hash_key_value': self.dynamizer.encode(hash_key), + 'range_key_conditions': rkc, + 'attributes_to_get': attributes_to_get, + 'limit': request_limit, + 'count': count, + 'consistent_read': consistent_read, + 'scan_index_forward': scan_index_forward, + 'exclusive_start_key': esk, + 'object_hook': self.dynamizer.decode} + return TableGenerator(table, self.layer1.query, + max_results, item_class, kwargs) + + def scan(self, table, scan_filter=None, + attributes_to_get=None, request_limit=None, max_results=None, + exclusive_start_key=None, item_class=Item, count=False): + """ + Perform a scan of DynamoDB. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object that is being scanned. + + :type scan_filter: A dict + :param scan_filter: A dictionary where the key is the + attribute name and the value is a + :class:`boto.dynamodb.condition.Condition` object. + Valid Condition objects include: + + * EQ - equal (1) + * NE - not equal (1) + * LE - less than or equal (1) + * LT - less than (1) + * GE - greater than or equal (1) + * GT - greater than (1) + * NOT_NULL - attribute exists (0, use None) + * NULL - attribute does not exist (0, use None) + * CONTAINS - substring or value in list (1) + * NOT_CONTAINS - absence of substring or value in list (1) + * BEGINS_WITH - substring prefix (1) + * IN - exact match in list (N) + * BETWEEN - >= first value, <= second value (2) + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type request_limit: int + :param request_limit: The maximum number of items to retrieve + from Amazon DynamoDB on each request. You may want to set + a specific request_limit based on the provisioned throughput + of your table. The default behavior is to retrieve as many + results as possible per request. + + :type max_results: int + :param max_results: The maximum number of results that will + be retrieved from Amazon DynamoDB in total. For example, + if you only wanted to see the first 100 results from the + query, regardless of how many were actually available, you + could set max_results to 100 and the generator returned + from the query method will only yeild 100 results max. + + :type count: bool + :param count: If True, Amazon DynamoDB returns a total + number of items for the Scan operation, even if the + operation has no matching items for the assigned filter. + If count is True, the actual items are not returned and + the count is accessible as the ``count`` attribute of + the returned object. + + :type exclusive_start_key: list or tuple + :param exclusive_start_key: Primary key of the item from + which to continue an earlier query. This would be + provided as the LastEvaluatedKey in that query. + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + + :rtype: :class:`boto.dynamodb.layer2.TableGenerator` + """ + if exclusive_start_key: + esk = self.build_key_from_values(table.schema, + *exclusive_start_key) + else: + esk = None + kwargs = {'table_name': table.name, + 'scan_filter': self.dynamize_scan_filter(scan_filter), + 'attributes_to_get': attributes_to_get, + 'limit': request_limit, + 'count': count, + 'exclusive_start_key': esk, + 'object_hook': self.dynamizer.decode} + return TableGenerator(table, self.layer1.scan, + max_results, item_class, kwargs) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/schema.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..4a697a827d48ee46a91d1e4db12ce2b9e40127a9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/schema.py @@ -0,0 +1,112 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class Schema(object): + """ + Represents a DynamoDB schema. + + :ivar hash_key_name: The name of the hash key of the schema. + :ivar hash_key_type: The DynamoDB type specification for the + hash key of the schema. + :ivar range_key_name: The name of the range key of the schema + or None if no range key is defined. + :ivar range_key_type: The DynamoDB type specification for the + range key of the schema or None if no range key is defined. + :ivar dict: The underlying Python dictionary that needs to be + passed to Layer1 methods. + """ + + def __init__(self, schema_dict): + self._dict = schema_dict + + def __repr__(self): + if self.range_key_name: + s = 'Schema(%s:%s)' % (self.hash_key_name, self.range_key_name) + else: + s = 'Schema(%s)' % self.hash_key_name + return s + + @classmethod + def create(cls, hash_key, range_key=None): + """Convenience method to create a schema object. + + Example usage:: + + schema = Schema.create(hash_key=('foo', 'N')) + schema2 = Schema.create(hash_key=('foo', 'N'), + range_key=('bar', 'S')) + + :type hash_key: tuple + :param hash_key: A tuple of (hash_key_name, hash_key_type) + + :type range_key: tuple + :param hash_key: A tuple of (range_key_name, range_key_type) + + """ + reconstructed = { + 'HashKeyElement': { + 'AttributeName': hash_key[0], + 'AttributeType': hash_key[1], + } + } + if range_key is not None: + reconstructed['RangeKeyElement'] = { + 'AttributeName': range_key[0], + 'AttributeType': range_key[1], + } + instance = cls(None) + instance._dict = reconstructed + return instance + + @property + def dict(self): + return self._dict + + @property + def hash_key_name(self): + return self._dict['HashKeyElement']['AttributeName'] + + @property + def hash_key_type(self): + return self._dict['HashKeyElement']['AttributeType'] + + @property + def range_key_name(self): + name = None + if 'RangeKeyElement' in self._dict: + name = self._dict['RangeKeyElement']['AttributeName'] + return name + + @property + def range_key_type(self): + type = None + if 'RangeKeyElement' in self._dict: + type = self._dict['RangeKeyElement']['AttributeType'] + return type + + def __eq__(self, other): + return (self.hash_key_name == other.hash_key_name and + self.hash_key_type == other.hash_key_type and + self.range_key_name == other.range_key_name and + self.range_key_type == other.range_key_type) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/table.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/table.py new file mode 100644 index 0000000000000000000000000000000000000000..152b95d908a74c0bf3c9de06b85f07c1b8a93a3a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/table.py @@ -0,0 +1,546 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.dynamodb.batch import BatchList +from boto.dynamodb.schema import Schema +from boto.dynamodb.item import Item +from boto.dynamodb import exceptions as dynamodb_exceptions +import time + + +class TableBatchGenerator(object): + """ + A low-level generator used to page through results from + batch_get_item operations. + + :ivar consumed_units: An integer that holds the number of + ConsumedCapacityUnits accumulated thus far for this + generator. + """ + + def __init__(self, table, keys, attributes_to_get=None, + consistent_read=False): + self.table = table + self.keys = keys + self.consumed_units = 0 + self.attributes_to_get = attributes_to_get + self.consistent_read = consistent_read + + def _queue_unprocessed(self, res): + if u'UnprocessedKeys' not in res: + return + if self.table.name not in res[u'UnprocessedKeys']: + return + + keys = res[u'UnprocessedKeys'][self.table.name][u'Keys'] + + for key in keys: + h = key[u'HashKeyElement'] + r = key[u'RangeKeyElement'] if u'RangeKeyElement' in key else None + self.keys.append((h, r)) + + def __iter__(self): + while self.keys: + # Build the next batch + batch = BatchList(self.table.layer2) + batch.add_batch(self.table, self.keys[:100], + self.attributes_to_get) + res = batch.submit() + + # parse the results + if self.table.name not in res[u'Responses']: + continue + self.consumed_units += res[u'Responses'][self.table.name][u'ConsumedCapacityUnits'] + for elem in res[u'Responses'][self.table.name][u'Items']: + yield elem + + # re-queue un processed keys + self.keys = self.keys[100:] + self._queue_unprocessed(res) + + +class Table(object): + """ + An Amazon DynamoDB table. + + :ivar name: The name of the table. + :ivar create_time: The date and time that the table was created. + :ivar status: The current status of the table. One of: + 'ACTIVE', 'UPDATING', 'DELETING'. + :ivar schema: A :class:`boto.dynamodb.schema.Schema` object representing + the schema defined for the table. + :ivar item_count: The number of items in the table. This value is + set only when the Table object is created or refreshed and + may not reflect the actual count. + :ivar size_bytes: Total size of the specified table, in bytes. + Amazon DynamoDB updates this value approximately every six hours. + Recent changes might not be reflected in this value. + :ivar read_units: The ReadCapacityUnits of the tables + Provisioned Throughput. + :ivar write_units: The WriteCapacityUnits of the tables + Provisioned Throughput. + :ivar schema: The Schema object associated with the table. + """ + + def __init__(self, layer2, response): + """ + + :type layer2: :class:`boto.dynamodb.layer2.Layer2` + :param layer2: A `Layer2` api object. + + :type response: dict + :param response: The output of + `boto.dynamodb.layer1.Layer1.describe_table`. + + """ + self.layer2 = layer2 + self._dict = {} + self.update_from_response(response) + + @classmethod + def create_from_schema(cls, layer2, name, schema): + """Create a Table object. + + If you know the name and schema of your table, you can + create a ``Table`` object without having to make any + API calls (normally an API call is made to retrieve + the schema of a table). + + Example usage:: + + table = Table.create_from_schema( + boto.connect_dynamodb(), + 'tablename', + Schema.create(hash_key=('keyname', 'N'))) + + :type layer2: :class:`boto.dynamodb.layer2.Layer2` + :param layer2: A ``Layer2`` api object. + + :type name: str + :param name: The name of the table. + + :type schema: :class:`boto.dynamodb.schema.Schema` + :param schema: The schema associated with the table. + + :rtype: :class:`boto.dynamodb.table.Table` + :return: A Table object representing the table. + + """ + table = cls(layer2, {'Table': {'TableName': name}}) + table._schema = schema + return table + + def __repr__(self): + return 'Table(%s)' % self.name + + @property + def name(self): + return self._dict['TableName'] + + @property + def create_time(self): + return self._dict.get('CreationDateTime', None) + + @property + def status(self): + return self._dict.get('TableStatus', None) + + @property + def item_count(self): + return self._dict.get('ItemCount', 0) + + @property + def size_bytes(self): + return self._dict.get('TableSizeBytes', 0) + + @property + def schema(self): + return self._schema + + @property + def read_units(self): + try: + return self._dict['ProvisionedThroughput']['ReadCapacityUnits'] + except KeyError: + return None + + @property + def write_units(self): + try: + return self._dict['ProvisionedThroughput']['WriteCapacityUnits'] + except KeyError: + return None + + def update_from_response(self, response): + """ + Update the state of the Table object based on the response + data received from Amazon DynamoDB. + """ + # 'Table' is from a describe_table call. + if 'Table' in response: + self._dict.update(response['Table']) + # 'TableDescription' is from a create_table call. + elif 'TableDescription' in response: + self._dict.update(response['TableDescription']) + if 'KeySchema' in self._dict: + self._schema = Schema(self._dict['KeySchema']) + + def refresh(self, wait_for_active=False, retry_seconds=5): + """ + Refresh all of the fields of the Table object by calling + the underlying DescribeTable request. + + :type wait_for_active: bool + :param wait_for_active: If True, this command will not return + until the table status, as returned from Amazon DynamoDB, is + 'ACTIVE'. + + :type retry_seconds: int + :param retry_seconds: If wait_for_active is True, this + parameter controls the number of seconds of delay between + calls to update_table in Amazon DynamoDB. Default is 5 seconds. + """ + done = False + while not done: + response = self.layer2.describe_table(self.name) + self.update_from_response(response) + if wait_for_active: + if self.status == 'ACTIVE': + done = True + else: + time.sleep(retry_seconds) + else: + done = True + + def update_throughput(self, read_units, write_units): + """ + Update the ProvisionedThroughput for the Amazon DynamoDB Table. + + :type read_units: int + :param read_units: The new value for ReadCapacityUnits. + + :type write_units: int + :param write_units: The new value for WriteCapacityUnits. + """ + self.layer2.update_throughput(self, read_units, write_units) + + def delete(self): + """ + Delete this table and all items in it. After calling this + the Table objects status attribute will be set to 'DELETING'. + """ + self.layer2.delete_table(self) + + def get_item(self, hash_key, range_key=None, + attributes_to_get=None, consistent_read=False, + item_class=Item): + """ + Retrieve an existing item from the table. + + :type hash_key: int|long|float|str|unicode|Binary + :param hash_key: The HashKey of the requested item. The + type of the value must match the type defined in the + schema for the table. + + :type range_key: int|long|float|str|unicode|Binary + :param range_key: The optional RangeKey of the requested item. + The type of the value must match the type defined in the + schema for the table. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + """ + return self.layer2.get_item(self, hash_key, range_key, + attributes_to_get, consistent_read, + item_class) + lookup = get_item + + def has_item(self, hash_key, range_key=None, consistent_read=False): + """ + Checks the table to see if the Item with the specified ``hash_key`` + exists. This may save a tiny bit of time/bandwidth over a + straight :py:meth:`get_item` if you have no intention to touch + the data that is returned, since this method specifically tells + Amazon not to return anything but the Item's key. + + :type hash_key: int|long|float|str|unicode|Binary + :param hash_key: The HashKey of the requested item. The + type of the value must match the type defined in the + schema for the table. + + :type range_key: int|long|float|str|unicode|Binary + :param range_key: The optional RangeKey of the requested item. + The type of the value must match the type defined in the + schema for the table. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + + :rtype: bool + :returns: ``True`` if the Item exists, ``False`` if not. + """ + try: + # Attempt to get the key. If it can't be found, it'll raise + # an exception. + self.get_item(hash_key, range_key=range_key, + # This minimizes the size of the response body. + attributes_to_get=[hash_key], + consistent_read=consistent_read) + except dynamodb_exceptions.DynamoDBKeyNotFoundError: + # Key doesn't exist. + return False + return True + + def new_item(self, hash_key=None, range_key=None, attrs=None, + item_class=Item): + """ + Return an new, unsaved Item which can later be PUT to + Amazon DynamoDB. + + This method has explicit (but optional) parameters for + the hash_key and range_key values of the item. You can use + these explicit parameters when calling the method, such as:: + + >>> my_item = my_table.new_item(hash_key='a', range_key=1, + attrs={'key1': 'val1', 'key2': 'val2'}) + >>> my_item + {u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'} + + Or, if you prefer, you can simply put the hash_key and range_key + in the attrs dictionary itself, like this:: + + >>> attrs = {'foo': 'a', 'bar': 1, 'key1': 'val1', 'key2': 'val2'} + >>> my_item = my_table.new_item(attrs=attrs) + >>> my_item + {u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'} + + The effect is the same. + + .. note: + The explicit parameters take priority over the values in + the attrs dict. So, if you have a hash_key or range_key + in the attrs dict and you also supply either or both using + the explicit parameters, the values in the attrs will be + ignored. + + :type hash_key: int|long|float|str|unicode|Binary + :param hash_key: The HashKey of the new item. The + type of the value must match the type defined in the + schema for the table. + + :type range_key: int|long|float|str|unicode|Binary + :param range_key: The optional RangeKey of the new item. + The type of the value must match the type defined in the + schema for the table. + + :type attrs: dict + :param attrs: A dictionary of key value pairs used to + populate the new item. + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + """ + return item_class(self, hash_key, range_key, attrs) + + def query(self, hash_key, *args, **kw): + """ + Perform a query on the table. + + :type hash_key: int|long|float|str|unicode|Binary + :param hash_key: The HashKey of the requested item. The + type of the value must match the type defined in the + schema for the table. + + :type range_key_condition: :class:`boto.dynamodb.condition.Condition` + :param range_key_condition: A Condition object. + Condition object can be one of the following types: + + EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN + + The only condition which expects or will accept two + values is 'BETWEEN', otherwise a single value should + be passed to the Condition constructor. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type request_limit: int + :param request_limit: The maximum number of items to retrieve + from Amazon DynamoDB on each request. You may want to set + a specific request_limit based on the provisioned throughput + of your table. The default behavior is to retrieve as many + results as possible per request. + + :type max_results: int + :param max_results: The maximum number of results that will + be retrieved from Amazon DynamoDB in total. For example, + if you only wanted to see the first 100 results from the + query, regardless of how many were actually available, you + could set max_results to 100 and the generator returned + from the query method will only yeild 100 results max. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + + :type scan_index_forward: bool + :param scan_index_forward: Specified forward or backward + traversal of the index. Default is forward (True). + + :type exclusive_start_key: list or tuple + :param exclusive_start_key: Primary key of the item from + which to continue an earlier query. This would be + provided as the LastEvaluatedKey in that query. + + :type count: bool + :param count: If True, Amazon DynamoDB returns a total + number of items for the Query operation, even if the + operation has no matching items for the assigned filter. + If count is True, the actual items are not returned and + the count is accessible as the ``count`` attribute of + the returned object. + + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + """ + return self.layer2.query(self, hash_key, *args, **kw) + + def scan(self, *args, **kw): + """ + Scan through this table, this is a very long + and expensive operation, and should be avoided if + at all possible. + + :type scan_filter: A dict + :param scan_filter: A dictionary where the key is the + attribute name and the value is a + :class:`boto.dynamodb.condition.Condition` object. + Valid Condition objects include: + + * EQ - equal (1) + * NE - not equal (1) + * LE - less than or equal (1) + * LT - less than (1) + * GE - greater than or equal (1) + * GT - greater than (1) + * NOT_NULL - attribute exists (0, use None) + * NULL - attribute does not exist (0, use None) + * CONTAINS - substring or value in list (1) + * NOT_CONTAINS - absence of substring or value in list (1) + * BEGINS_WITH - substring prefix (1) + * IN - exact match in list (N) + * BETWEEN - >= first value, <= second value (2) + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type request_limit: int + :param request_limit: The maximum number of items to retrieve + from Amazon DynamoDB on each request. You may want to set + a specific request_limit based on the provisioned throughput + of your table. The default behavior is to retrieve as many + results as possible per request. + + :type max_results: int + :param max_results: The maximum number of results that will + be retrieved from Amazon DynamoDB in total. For example, + if you only wanted to see the first 100 results from the + query, regardless of how many were actually available, you + could set max_results to 100 and the generator returned + from the query method will only yeild 100 results max. + + :type count: bool + :param count: If True, Amazon DynamoDB returns a total + number of items for the Scan operation, even if the + operation has no matching items for the assigned filter. + If count is True, the actual items are not returned and + the count is accessible as the ``count`` attribute of + the returned object. + + :type exclusive_start_key: list or tuple + :param exclusive_start_key: Primary key of the item from + which to continue an earlier query. This would be + provided as the LastEvaluatedKey in that query. + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + + :return: A TableGenerator (generator) object which will iterate + over all results + :rtype: :class:`boto.dynamodb.layer2.TableGenerator` + """ + return self.layer2.scan(self, *args, **kw) + + def batch_get_item(self, keys, attributes_to_get=None): + """ + Return a set of attributes for a multiple items from a single table + using their primary keys. This abstraction removes the 100 Items per + batch limitations as well as the "UnprocessedKeys" logic. + + :type keys: list + :param keys: A list of scalar or tuple values. Each element in the + list represents one Item to retrieve. If the schema for the + table has both a HashKey and a RangeKey, each element in the + list should be a tuple consisting of (hash_key, range_key). If + the schema for the table contains only a HashKey, each element + in the list should be a scalar value of the appropriate type + for the table schema. NOTE: The maximum number of items that + can be retrieved for a single operation is 100. Also, the + number of items retrieved is constrained by a 1 MB size limit. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :return: A TableBatchGenerator (generator) object which will + iterate over all results + :rtype: :class:`boto.dynamodb.table.TableBatchGenerator` + """ + return TableBatchGenerator(self, keys, attributes_to_get) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/types.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/types.py new file mode 100644 index 0000000000000000000000000000000000000000..6a48ae5f8b7936226f107ad57a4604ae877cb284 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb/types.py @@ -0,0 +1,410 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +Some utility functions to deal with mapping Amazon DynamoDB types to +Python types and vice-versa. +""" +import base64 +from decimal import (Decimal, DecimalException, Context, + Clamped, Overflow, Inexact, Underflow, Rounded) +from collections import Mapping +from boto.dynamodb.exceptions import DynamoDBNumberError +from boto.compat import filter, map, six, long_type + + +DYNAMODB_CONTEXT = Context( + Emin=-128, Emax=126, rounding=None, prec=38, + traps=[Clamped, Overflow, Inexact, Rounded, Underflow]) + + +# python2.6 cannot convert floats directly to +# Decimals. This is taken from: +# http://docs.python.org/release/2.6.7/library/decimal.html#decimal-faq +def float_to_decimal(f): + n, d = f.as_integer_ratio() + numerator, denominator = Decimal(n), Decimal(d) + ctx = DYNAMODB_CONTEXT + result = ctx.divide(numerator, denominator) + while ctx.flags[Inexact]: + ctx.flags[Inexact] = False + ctx.prec *= 2 + result = ctx.divide(numerator, denominator) + return result + + +def is_num(n, boolean_as_int=True): + if boolean_as_int: + types = (int, long_type, float, Decimal, bool) + else: + types = (int, long_type, float, Decimal) + + return isinstance(n, types) or n in types + + +if six.PY2: + def is_str(n): + return (isinstance(n, basestring) or + isinstance(n, type) and issubclass(n, basestring)) + + def is_binary(n): + return isinstance(n, Binary) + +else: # PY3 + def is_str(n): + return (isinstance(n, str) or + isinstance(n, type) and issubclass(n, str)) + + def is_binary(n): + return isinstance(n, bytes) # Binary is subclass of bytes. + + +def serialize_num(val): + """Cast a number to a string and perform + validation to ensure no loss of precision. + """ + if isinstance(val, bool): + return str(int(val)) + return str(val) + + +def convert_num(s): + if '.' in s: + n = float(s) + else: + n = int(s) + return n + + +def convert_binary(n): + return Binary(base64.b64decode(n)) + + +def get_dynamodb_type(val, use_boolean=True): + """ + Take a scalar Python value and return a string representing + the corresponding Amazon DynamoDB type. If the value passed in is + not a supported type, raise a TypeError. + """ + dynamodb_type = None + if val is None: + dynamodb_type = 'NULL' + elif is_num(val): + if isinstance(val, bool) and use_boolean: + dynamodb_type = 'BOOL' + else: + dynamodb_type = 'N' + elif is_str(val): + dynamodb_type = 'S' + elif isinstance(val, (set, frozenset)): + if False not in map(is_num, val): + dynamodb_type = 'NS' + elif False not in map(is_str, val): + dynamodb_type = 'SS' + elif False not in map(is_binary, val): + dynamodb_type = 'BS' + elif is_binary(val): + dynamodb_type = 'B' + elif isinstance(val, Mapping): + dynamodb_type = 'M' + elif isinstance(val, list): + dynamodb_type = 'L' + if dynamodb_type is None: + msg = 'Unsupported type "%s" for value "%s"' % (type(val), val) + raise TypeError(msg) + return dynamodb_type + + +def dynamize_value(val): + """ + Take a scalar Python value and return a dict consisting + of the Amazon DynamoDB type specification and the value that + needs to be sent to Amazon DynamoDB. If the type of the value + is not supported, raise a TypeError + """ + dynamodb_type = get_dynamodb_type(val) + if dynamodb_type == 'N': + val = {dynamodb_type: serialize_num(val)} + elif dynamodb_type == 'S': + val = {dynamodb_type: val} + elif dynamodb_type == 'NS': + val = {dynamodb_type: list(map(serialize_num, val))} + elif dynamodb_type == 'SS': + val = {dynamodb_type: [n for n in val]} + elif dynamodb_type == 'B': + if isinstance(val, bytes): + val = Binary(val) + val = {dynamodb_type: val.encode()} + elif dynamodb_type == 'BS': + val = {dynamodb_type: [n.encode() for n in val]} + return val + + +if six.PY2: + class Binary(object): + def __init__(self, value): + if not isinstance(value, (bytes, six.text_type)): + raise TypeError('Value must be a string of binary data!') + if not isinstance(value, bytes): + value = value.encode("utf-8") + + self.value = value + + def encode(self): + return base64.b64encode(self.value).decode('utf-8') + + def __eq__(self, other): + if isinstance(other, Binary): + return self.value == other.value + else: + return self.value == other + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return 'Binary(%r)' % self.value + + def __str__(self): + return self.value + + def __hash__(self): + return hash(self.value) +else: + class Binary(bytes): + def encode(self): + return base64.b64encode(self).decode('utf-8') + + @property + def value(self): + # This matches the public API of the Python 2 version, + # but just returns itself since it is already a bytes + # instance. + return bytes(self) + + def __repr__(self): + return 'Binary(%r)' % self.value + + +def item_object_hook(dct): + """ + A custom object hook for use when decoding JSON item bodys. + This hook will transform Amazon DynamoDB JSON responses to something + that maps directly to native Python types. + """ + if len(dct.keys()) > 1: + return dct + if 'S' in dct: + return dct['S'] + if 'N' in dct: + return convert_num(dct['N']) + if 'SS' in dct: + return set(dct['SS']) + if 'NS' in dct: + return set(map(convert_num, dct['NS'])) + if 'B' in dct: + return convert_binary(dct['B']) + if 'BS' in dct: + return set(map(convert_binary, dct['BS'])) + return dct + + +class Dynamizer(object): + """Control serialization/deserialization of types. + + This class controls the encoding of python types to the + format that is expected by the DynamoDB API, as well as + taking DynamoDB types and constructing the appropriate + python types. + + If you want to customize this process, you can subclass + this class and override the encoding/decoding of + specific types. For example:: + + 'foo' (Python type) + | + v + encode('foo') + | + v + _encode_s('foo') + | + v + {'S': 'foo'} (Encoding sent to/received from DynamoDB) + | + V + decode({'S': 'foo'}) + | + v + _decode_s({'S': 'foo'}) + | + v + 'foo' (Python type) + + """ + def _get_dynamodb_type(self, attr): + return get_dynamodb_type(attr) + + def encode(self, attr): + """ + Encodes a python type to the format expected + by DynamoDB. + + """ + dynamodb_type = self._get_dynamodb_type(attr) + try: + encoder = getattr(self, '_encode_%s' % dynamodb_type.lower()) + except AttributeError: + raise ValueError("Unable to encode dynamodb type: %s" % + dynamodb_type) + return {dynamodb_type: encoder(attr)} + + def _encode_n(self, attr): + try: + if isinstance(attr, float) and not hasattr(Decimal, 'from_float'): + # python2.6 does not support creating Decimals directly + # from floats so we have to do this ourself. + n = str(float_to_decimal(attr)) + else: + n = str(DYNAMODB_CONTEXT.create_decimal(attr)) + if list(filter(lambda x: x in n, ('Infinity', 'NaN'))): + raise TypeError('Infinity and NaN not supported') + return n + except (TypeError, DecimalException) as e: + msg = '{0} numeric for `{1}`\n{2}'.format( + e.__class__.__name__, attr, str(e) or '') + raise DynamoDBNumberError(msg) + + def _encode_s(self, attr): + if isinstance(attr, bytes): + attr = attr.decode('utf-8') + elif not isinstance(attr, six.text_type): + attr = str(attr) + return attr + + def _encode_ns(self, attr): + return list(map(self._encode_n, attr)) + + def _encode_ss(self, attr): + return [self._encode_s(n) for n in attr] + + def _encode_b(self, attr): + if isinstance(attr, bytes): + attr = Binary(attr) + return attr.encode() + + def _encode_bs(self, attr): + return [self._encode_b(n) for n in attr] + + def _encode_null(self, attr): + return True + + def _encode_bool(self, attr): + return attr + + def _encode_m(self, attr): + return dict([(k, self.encode(v)) for k, v in attr.items()]) + + def _encode_l(self, attr): + return [self.encode(i) for i in attr] + + def decode(self, attr): + """ + Takes the format returned by DynamoDB and constructs + the appropriate python type. + + """ + if len(attr) > 1 or not attr: + return attr + dynamodb_type = list(attr.keys())[0] + if dynamodb_type.lower() == dynamodb_type: + # It's not an actual type, just a single character attr that + # overlaps with the DDB types. Return it. + return attr + try: + decoder = getattr(self, '_decode_%s' % dynamodb_type.lower()) + except AttributeError: + return attr + return decoder(attr[dynamodb_type]) + + def _decode_n(self, attr): + return DYNAMODB_CONTEXT.create_decimal(attr) + + def _decode_s(self, attr): + return attr + + def _decode_ns(self, attr): + return set(map(self._decode_n, attr)) + + def _decode_ss(self, attr): + return set(map(self._decode_s, attr)) + + def _decode_b(self, attr): + return convert_binary(attr) + + def _decode_bs(self, attr): + return set(map(self._decode_b, attr)) + + def _decode_null(self, attr): + return None + + def _decode_bool(self, attr): + return attr + + def _decode_m(self, attr): + return dict([(k, self.decode(v)) for k, v in attr.items()]) + + def _decode_l(self, attr): + return [self.decode(i) for i in attr] + + +class NonBooleanDynamizer(Dynamizer): + """Casting boolean type to numeric types. + + This class is provided for backward compatibility. + """ + def _get_dynamodb_type(self, attr): + return get_dynamodb_type(attr, use_boolean=False) + + +class LossyFloatDynamizer(NonBooleanDynamizer): + """Use float/int instead of Decimal for numeric types. + + This class is provided for backwards compatibility. Instead of + using Decimals for the 'N', 'NS' types it uses ints/floats. + + This class is deprecated and its usage is not encouraged, + as doing so may result in loss of precision. Use the + `Dynamizer` class instead. + + """ + def _encode_n(self, attr): + return serialize_num(attr) + + def _encode_ns(self, attr): + return [str(i) for i in attr] + + def _decode_n(self, attr): + return convert_num(attr) + + def _decode_ns(self, attr): + return set(map(self._decode_n, attr)) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aa07e5cce4b51bc8c9f03e131c2fcbe2176194e4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon DynamoDB service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.dynamodb2.layer1 import DynamoDBConnection + return get_regions('dynamodb', connection_cls=DynamoDBConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..3a677e09dfcf723449581482218167ccf458b2cf --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/exceptions.py @@ -0,0 +1,78 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class ProvisionedThroughputExceededException(JSONResponseError): + pass + + +class LimitExceededException(JSONResponseError): + pass + + +class ConditionalCheckFailedException(JSONResponseError): + pass + + +class ResourceInUseException(JSONResponseError): + pass + + +class ResourceNotFoundException(JSONResponseError): + pass + + +class InternalServerError(JSONResponseError): + pass + + +class ValidationException(JSONResponseError): + pass + + +class ItemCollectionSizeLimitExceededException(JSONResponseError): + pass + + +class DynamoDBError(Exception): + pass + + +class UnknownSchemaFieldError(DynamoDBError): + pass + + +class UnknownIndexFieldError(DynamoDBError): + pass + + +class UnknownFilterTypeError(DynamoDBError): + pass + + +class QueryError(DynamoDBError): + pass + + +class ItemNotFound(DynamoDBError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/fields.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/fields.py new file mode 100644 index 0000000000000000000000000000000000000000..4443969e4e1c43d0c87bce02233a7b654410b498 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/fields.py @@ -0,0 +1,337 @@ +from boto.dynamodb2.types import STRING + + +class BaseSchemaField(object): + """ + An abstract class for defining schema fields. + + Contains most of the core functionality for the field. Subclasses must + define an ``attr_type`` to pass to DynamoDB. + """ + attr_type = None + + def __init__(self, name, data_type=STRING): + """ + Creates a Python schema field, to represent the data to pass to + DynamoDB. + + Requires a ``name`` parameter, which should be a string name of the + field. + + Optionally accepts a ``data_type`` parameter, which should be a + constant from ``boto.dynamodb2.types``. (Default: ``STRING``) + """ + self.name = name + self.data_type = data_type + + def definition(self): + """ + Returns the attribute definition structure DynamoDB expects. + + Example:: + + >>> field.definition() + { + 'AttributeName': 'username', + 'AttributeType': 'S', + } + + """ + return { + 'AttributeName': self.name, + 'AttributeType': self.data_type, + } + + def schema(self): + """ + Returns the schema structure DynamoDB expects. + + Example:: + + >>> field.schema() + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + } + + """ + return { + 'AttributeName': self.name, + 'KeyType': self.attr_type, + } + + +class HashKey(BaseSchemaField): + """ + An field representing a hash key. + + Example:: + + >>> from boto.dynamodb2.types import NUMBER + >>> HashKey('username') + >>> HashKey('date_joined', data_type=NUMBER) + + """ + attr_type = 'HASH' + + +class RangeKey(BaseSchemaField): + """ + An field representing a range key. + + Example:: + + >>> from boto.dynamodb2.types import NUMBER + >>> HashKey('username') + >>> HashKey('date_joined', data_type=NUMBER) + + """ + attr_type = 'RANGE' + + +class BaseIndexField(object): + """ + An abstract class for defining schema indexes. + + Contains most of the core functionality for the index. Subclasses must + define a ``projection_type`` to pass to DynamoDB. + """ + def __init__(self, name, parts): + self.name = name + self.parts = parts + + def definition(self): + """ + Returns the attribute definition structure DynamoDB expects. + + Example:: + + >>> index.definition() + { + 'AttributeName': 'username', + 'AttributeType': 'S', + } + + """ + definition = [] + + for part in self.parts: + definition.append({ + 'AttributeName': part.name, + 'AttributeType': part.data_type, + }) + + return definition + + def schema(self): + """ + Returns the schema structure DynamoDB expects. + + Example:: + + >>> index.schema() + { + 'IndexName': 'LastNameIndex', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + ], + 'Projection': { + 'ProjectionType': 'KEYS_ONLY', + } + } + + """ + key_schema = [] + + for part in self.parts: + key_schema.append(part.schema()) + + return { + 'IndexName': self.name, + 'KeySchema': key_schema, + 'Projection': { + 'ProjectionType': self.projection_type, + } + } + + +class AllIndex(BaseIndexField): + """ + An index signifying all fields should be in the index. + + Example:: + + >>> AllIndex('MostRecentlyJoined', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ]) + + """ + projection_type = 'ALL' + + +class KeysOnlyIndex(BaseIndexField): + """ + An index signifying only key fields should be in the index. + + Example:: + + >>> KeysOnlyIndex('MostRecentlyJoined', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ]) + + """ + projection_type = 'KEYS_ONLY' + + +class IncludeIndex(BaseIndexField): + """ + An index signifying only certain fields should be in the index. + + Example:: + + >>> IncludeIndex('GenderIndex', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ], includes=['gender']) + + """ + projection_type = 'INCLUDE' + + def __init__(self, *args, **kwargs): + self.includes_fields = kwargs.pop('includes', []) + super(IncludeIndex, self).__init__(*args, **kwargs) + + def schema(self): + schema_data = super(IncludeIndex, self).schema() + schema_data['Projection']['NonKeyAttributes'] = self.includes_fields + return schema_data + + +class GlobalBaseIndexField(BaseIndexField): + """ + An abstract class for defining global indexes. + + Contains most of the core functionality for the index. Subclasses must + define a ``projection_type`` to pass to DynamoDB. + """ + throughput = { + 'read': 5, + 'write': 5, + } + + def __init__(self, *args, **kwargs): + throughput = kwargs.pop('throughput', None) + + if throughput is not None: + self.throughput = throughput + + super(GlobalBaseIndexField, self).__init__(*args, **kwargs) + + def schema(self): + """ + Returns the schema structure DynamoDB expects. + + Example:: + + >>> index.schema() + { + 'IndexName': 'LastNameIndex', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + ], + 'Projection': { + 'ProjectionType': 'KEYS_ONLY', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + } + + """ + schema_data = super(GlobalBaseIndexField, self).schema() + schema_data['ProvisionedThroughput'] = { + 'ReadCapacityUnits': int(self.throughput['read']), + 'WriteCapacityUnits': int(self.throughput['write']), + } + return schema_data + + +class GlobalAllIndex(GlobalBaseIndexField): + """ + An index signifying all fields should be in the index. + + Example:: + + >>> GlobalAllIndex('MostRecentlyJoined', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ], + ... throughput={ + ... 'read': 2, + ... 'write': 1, + ... }) + + """ + projection_type = 'ALL' + + +class GlobalKeysOnlyIndex(GlobalBaseIndexField): + """ + An index signifying only key fields should be in the index. + + Example:: + + >>> GlobalKeysOnlyIndex('MostRecentlyJoined', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ], + ... throughput={ + ... 'read': 2, + ... 'write': 1, + ... }) + + """ + projection_type = 'KEYS_ONLY' + + +class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex): + """ + An index signifying only certain fields should be in the index. + + Example:: + + >>> GlobalIncludeIndex('GenderIndex', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ], + ... includes=['gender'], + ... throughput={ + ... 'read': 2, + ... 'write': 1, + ... }) + + """ + projection_type = 'INCLUDE' + + def __init__(self, *args, **kwargs): + throughput = kwargs.pop('throughput', None) + IncludeIndex.__init__(self, *args, **kwargs) + if throughput: + kwargs['throughput'] = throughput + GlobalBaseIndexField.__init__(self, *args, **kwargs) + + def schema(self): + # Pick up the includes. + schema_data = IncludeIndex.schema(self) + # Also the throughput. + schema_data.update(GlobalBaseIndexField.schema(self)) + return schema_data diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/items.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/items.py new file mode 100644 index 0000000000000000000000000000000000000000..b1b535f634be42ac3569fca725a1762eea5ac6cd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/items.py @@ -0,0 +1,473 @@ +from copy import deepcopy + + +class NEWVALUE(object): + # A marker for new data added. + pass + + +class Item(object): + """ + An object representing the item data within a DynamoDB table. + + An item is largely schema-free, meaning it can contain any data. The only + limitation is that it must have data for the fields in the ``Table``'s + schema. + + This object presents a dictionary-like interface for accessing/storing + data. It also tries to intelligently track how data has changed throughout + the life of the instance, to be as efficient as possible about updates. + + Empty items, or items that have no data, are considered falsey. + + """ + def __init__(self, table, data=None, loaded=False): + """ + Constructs an (unsaved) ``Item`` instance. + + To persist the data in DynamoDB, you'll need to call the ``Item.save`` + (or ``Item.partial_save``) on the instance. + + Requires a ``table`` parameter, which should be a ``Table`` instance. + This is required, as DynamoDB's API is focus around all operations + being table-level. It's also for persisting schema around many objects. + + Optionally accepts a ``data`` parameter, which should be a dictionary + of the fields & values of the item. Alternatively, an ``Item`` instance + may be provided from which to extract the data. + + Optionally accepts a ``loaded`` parameter, which should be a boolean. + ``True`` if it was preexisting data loaded from DynamoDB, ``False`` if + it's new data from the user. Default is ``False``. + + Example:: + + >>> users = Table('users') + >>> user = Item(users, data={ + ... 'username': 'johndoe', + ... 'first_name': 'John', + ... 'date_joined': 1248o61592, + ... }) + + # Change existing data. + >>> user['first_name'] = 'Johann' + # Add more data. + >>> user['last_name'] = 'Doe' + # Delete data. + >>> del user['date_joined'] + + # Iterate over all the data. + >>> for field, val in user.items(): + ... print "%s: %s" % (field, val) + username: johndoe + first_name: John + date_joined: 1248o61592 + + """ + self.table = table + self._loaded = loaded + self._orig_data = {} + self._data = data + self._dynamizer = table._dynamizer + + if isinstance(self._data, Item): + self._data = self._data._data + if self._data is None: + self._data = {} + + if self._loaded: + self._orig_data = deepcopy(self._data) + + def __getitem__(self, key): + return self._data.get(key, None) + + def __setitem__(self, key, value): + self._data[key] = value + + def __delitem__(self, key): + if not key in self._data: + return + + del self._data[key] + + def keys(self): + return self._data.keys() + + def values(self): + return self._data.values() + + def items(self): + return self._data.items() + + def get(self, key, default=None): + return self._data.get(key, default) + + def __iter__(self): + for key in self._data: + yield self._data[key] + + def __contains__(self, key): + return key in self._data + + def __bool__(self): + return bool(self._data) + + __nonzero__ = __bool__ + + def _determine_alterations(self): + """ + Checks the ``-orig_data`` against the ``_data`` to determine what + changes to the data are present. + + Returns a dictionary containing the keys ``adds``, ``changes`` & + ``deletes``, containing the updated data. + """ + alterations = { + 'adds': {}, + 'changes': {}, + 'deletes': [], + } + + orig_keys = set(self._orig_data.keys()) + data_keys = set(self._data.keys()) + + # Run through keys we know are in both for changes. + for key in orig_keys.intersection(data_keys): + if self._data[key] != self._orig_data[key]: + if self._is_storable(self._data[key]): + alterations['changes'][key] = self._data[key] + else: + alterations['deletes'].append(key) + + # Run through additions. + for key in data_keys.difference(orig_keys): + if self._is_storable(self._data[key]): + alterations['adds'][key] = self._data[key] + + # Run through deletions. + for key in orig_keys.difference(data_keys): + alterations['deletes'].append(key) + + return alterations + + def needs_save(self, data=None): + """ + Returns whether or not the data has changed on the ``Item``. + + Optionally accepts a ``data`` argument, which accepts the output from + ``self._determine_alterations()`` if you've already called it. Typically + unnecessary to do. Default is ``None``. + + Example: + + >>> user.needs_save() + False + >>> user['first_name'] = 'Johann' + >>> user.needs_save() + True + + """ + if data is None: + data = self._determine_alterations() + + needs_save = False + + for kind in ['adds', 'changes', 'deletes']: + if len(data[kind]): + needs_save = True + break + + return needs_save + + def mark_clean(self): + """ + Marks an ``Item`` instance as no longer needing to be saved. + + Example: + + >>> user.needs_save() + False + >>> user['first_name'] = 'Johann' + >>> user.needs_save() + True + >>> user.mark_clean() + >>> user.needs_save() + False + + """ + self._orig_data = deepcopy(self._data) + + def mark_dirty(self): + """ + DEPRECATED: Marks an ``Item`` instance as needing to be saved. + + This method is no longer necessary, as the state tracking on ``Item`` + has been improved to automatically detect proper state. + """ + return + + def load(self, data): + """ + This is only useful when being handed raw data from DynamoDB directly. + If you have a Python datastructure already, use the ``__init__`` or + manually set the data instead. + + Largely internal, unless you know what you're doing or are trying to + mix the low-level & high-level APIs. + """ + self._data = {} + + for field_name, field_value in data.get('Item', {}).items(): + self[field_name] = self._dynamizer.decode(field_value) + + self._loaded = True + self._orig_data = deepcopy(self._data) + + def get_keys(self): + """ + Returns a Python-style dict of the keys/values. + + Largely internal. + """ + key_fields = self.table.get_key_fields() + key_data = {} + + for key in key_fields: + key_data[key] = self[key] + + return key_data + + def get_raw_keys(self): + """ + Returns a DynamoDB-style dict of the keys/values. + + Largely internal. + """ + raw_key_data = {} + + for key, value in self.get_keys().items(): + raw_key_data[key] = self._dynamizer.encode(value) + + return raw_key_data + + def build_expects(self, fields=None): + """ + Builds up a list of expecations to hand off to DynamoDB on save. + + Largely internal. + """ + expects = {} + + if fields is None: + fields = list(self._data.keys()) + list(self._orig_data.keys()) + + # Only uniques. + fields = set(fields) + + for key in fields: + expects[key] = { + 'Exists': True, + } + value = None + + # Check for invalid keys. + if not key in self._orig_data and not key in self._data: + raise ValueError("Unknown key %s provided." % key) + + # States: + # * New field (only in _data) + # * Unchanged field (in both _data & _orig_data, same data) + # * Modified field (in both _data & _orig_data, different data) + # * Deleted field (only in _orig_data) + orig_value = self._orig_data.get(key, NEWVALUE) + current_value = self._data.get(key, NEWVALUE) + + if orig_value == current_value: + # Existing field unchanged. + value = current_value + else: + if key in self._data: + if not key in self._orig_data: + # New field. + expects[key]['Exists'] = False + else: + # Existing field modified. + value = orig_value + else: + # Existing field deleted. + value = orig_value + + if value is not None: + expects[key]['Value'] = self._dynamizer.encode(value) + + return expects + + def _is_storable(self, value): + # We need to prevent ``None``, empty string & empty set from + # heading to DDB, but allow false-y values like 0 & False make it. + if not value: + if not value in (0, 0.0, False): + return False + + return True + + def prepare_full(self): + """ + Runs through all fields & encodes them to be handed off to DynamoDB + as part of an ``save`` (``put_item``) call. + + Largely internal. + """ + # This doesn't save on it's own. Rather, we prepare the datastructure + # and hand-off to the table to handle creation/update. + final_data = {} + + for key, value in self._data.items(): + if not self._is_storable(value): + continue + + final_data[key] = self._dynamizer.encode(value) + + return final_data + + def prepare_partial(self): + """ + Runs through **ONLY** the changed/deleted fields & encodes them to be + handed off to DynamoDB as part of an ``partial_save`` (``update_item``) + call. + + Largely internal. + """ + # This doesn't save on it's own. Rather, we prepare the datastructure + # and hand-off to the table to handle creation/update. + final_data = {} + fields = set() + alterations = self._determine_alterations() + + for key, value in alterations['adds'].items(): + final_data[key] = { + 'Action': 'PUT', + 'Value': self._dynamizer.encode(self._data[key]) + } + fields.add(key) + + for key, value in alterations['changes'].items(): + final_data[key] = { + 'Action': 'PUT', + 'Value': self._dynamizer.encode(self._data[key]) + } + fields.add(key) + + for key in alterations['deletes']: + final_data[key] = { + 'Action': 'DELETE', + } + fields.add(key) + + return final_data, fields + + def partial_save(self): + """ + Saves only the changed data to DynamoDB. + + Extremely useful for high-volume/high-write data sets, this allows + you to update only a handful of fields rather than having to push + entire items. This prevents many accidental overwrite situations as + well as saves on the amount of data to transfer over the wire. + + Returns ``True`` on success, ``False`` if no save was performed or + the write failed. + + Example:: + + >>> user['last_name'] = 'Doh!' + # Only the last name field will be sent to DynamoDB. + >>> user.partial_save() + + """ + key = self.get_keys() + # Build a new dict of only the data we're changing. + final_data, fields = self.prepare_partial() + + if not final_data: + return False + + # Remove the key(s) from the ``final_data`` if present. + # They should only be present if this is a new item, in which + # case we shouldn't be sending as part of the data to update. + for fieldname, value in key.items(): + if fieldname in final_data: + del final_data[fieldname] + + try: + # It's likely also in ``fields``, so remove it there too. + fields.remove(fieldname) + except KeyError: + pass + + # Build expectations of only the fields we're planning to update. + expects = self.build_expects(fields=fields) + returned = self.table._update_item(key, final_data, expects=expects) + # Mark the object as clean. + self.mark_clean() + return returned + + def save(self, overwrite=False): + """ + Saves all data to DynamoDB. + + By default, this attempts to ensure that none of the underlying + data has changed. If any fields have changed in between when the + ``Item`` was constructed & when it is saved, this call will fail so + as not to cause any data loss. + + If you're sure possibly overwriting data is acceptable, you can pass + an ``overwrite=True``. If that's not acceptable, you may be able to use + ``Item.partial_save`` to only write the changed field data. + + Optionally accepts an ``overwrite`` parameter, which should be a + boolean. If you provide ``True``, the item will be forcibly overwritten + within DynamoDB, even if another process changed the data in the + meantime. (Default: ``False``) + + Returns ``True`` on success, ``False`` if no save was performed. + + Example:: + + >>> user['last_name'] = 'Doh!' + # All data on the Item is sent to DynamoDB. + >>> user.save() + + # If it fails, you can overwrite. + >>> user.save(overwrite=True) + + """ + if not self.needs_save() and not overwrite: + return False + + final_data = self.prepare_full() + expects = None + + if overwrite is False: + # Build expectations about *all* of the data. + expects = self.build_expects() + + returned = self.table._put_item(final_data, expects=expects) + # Mark the object as clean. + self.mark_clean() + return returned + + def delete(self): + """ + Deletes the item's data to DynamoDB. + + Returns ``True`` on success. + + Example:: + + # Buh-bye now. + >>> user.delete() + + """ + key_data = self.get_keys() + return self.table.delete_item(**key_data) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..bd1eb1ea9d40c147943575563b0c3d3a00d8dc3d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/layer1.py @@ -0,0 +1,2904 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from binascii import crc32 + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.dynamodb2 import exceptions + + +class DynamoDBConnection(AWSQueryConnection): + """ + Amazon DynamoDB + **Overview** + + This is the Amazon DynamoDB API Reference. This guide provides + descriptions and samples of the low-level DynamoDB API. For + information about DynamoDB application development, go to the + `Amazon DynamoDB Developer Guide`_. + + Instead of making the requests to the low-level DynamoDB API + directly from your application, we recommend that you use the AWS + Software Development Kits (SDKs). The easy-to-use libraries in the + AWS SDKs make it unnecessary to call the low-level DynamoDB API + directly from your application. The libraries take care of request + authentication, serialization, and connection management. For more + information, go to `Using the AWS SDKs with DynamoDB`_ in the + Amazon DynamoDB Developer Guide . + + If you decide to code against the low-level DynamoDB API directly, + you will need to write the necessary code to authenticate your + requests. For more information on signing your requests, go to + `Using the DynamoDB API`_ in the Amazon DynamoDB Developer Guide . + + The following are short descriptions of each low-level API action, + organized by function. + + **Managing Tables** + + + + CreateTable - Creates a table with user-specified provisioned + throughput settings. You must designate one attribute as the hash + primary key for the table; you can optionally designate a second + attribute as the range primary key. DynamoDB creates indexes on + these key attributes for fast data access. Optionally, you can + create one or more secondary indexes, which provide fast data + access using non-key attributes. + + DescribeTable - Returns metadata for a table, such as table + size, status, and index information. + + UpdateTable - Modifies the provisioned throughput settings for a + table. Optionally, you can modify the provisioned throughput + settings for global secondary indexes on the table. + + ListTables - Returns a list of all tables associated with the + current AWS account and endpoint. + + DeleteTable - Deletes a table and all of its indexes. + + + For conceptual information about managing tables, go to `Working + with Tables`_ in the Amazon DynamoDB Developer Guide . + + **Reading Data** + + + + GetItem - Returns a set of attributes for the item that has a + given primary key. By default, GetItem performs an eventually + consistent read; however, applications can specify a strongly + consistent read instead. + + BatchGetItem - Performs multiple GetItem requests for data items + using their primary keys, from one table or multiple tables. The + response from BatchGetItem has a size limit of 16 MB and returns a + maximum of 100 items. Both eventually consistent and strongly + consistent reads can be used. + + Query - Returns one or more items from a table or a secondary + index. You must provide a specific hash key value. You can narrow + the scope of the query using comparison operators against a range + key value, or on the index key. Query supports either eventual or + strong consistency. A single response has a size limit of 1 MB. + + Scan - Reads every item in a table; the result set is eventually + consistent. You can limit the number of items returned by + filtering the data attributes, using conditional expressions. Scan + can be used to enable ad-hoc querying of a table against non-key + attributes; however, since this is a full table scan without using + an index, Scan should not be used for any application query use + case that requires predictable performance. + + + For conceptual information about reading data, go to `Working with + Items`_ and `Query and Scan Operations`_ in the Amazon DynamoDB + Developer Guide . + + **Modifying Data** + + + + PutItem - Creates a new item, or replaces an existing item with + a new item (including all the attributes). By default, if an item + in the table already exists with the same primary key, the new + item completely replaces the existing item. You can use + conditional operators to replace an item only if its attribute + values match certain conditions, or to insert a new item only if + that item doesn't already exist. + + UpdateItem - Modifies the attributes of an existing item. You + can also use conditional operators to perform an update only if + the item's attribute values match certain conditions. + + DeleteItem - Deletes an item in a table by primary key. You can + use conditional operators to perform a delete an item only if the + item's attribute values match certain conditions. + + BatchWriteItem - Performs multiple PutItem and DeleteItem + requests across multiple tables in a single request. A failure of + any request(s) in the batch will not cause the entire + BatchWriteItem operation to fail. Supports batches of up to 25 + items to put or delete, with a maximum total request size of 16 + MB. + + + For conceptual information about modifying data, go to `Working + with Items`_ and `Query and Scan Operations`_ in the Amazon + DynamoDB Developer Guide . + """ + APIVersion = "2012-08-10" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "dynamodb.us-east-1.amazonaws.com" + ServiceName = "DynamoDB" + TargetPrefix = "DynamoDB_20120810" + ResponseError = JSONResponseError + + _faults = { + "ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException, + "LimitExceededException": exceptions.LimitExceededException, + "ConditionalCheckFailedException": exceptions.ConditionalCheckFailedException, + "ResourceInUseException": exceptions.ResourceInUseException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InternalServerError": exceptions.InternalServerError, + "ItemCollectionSizeLimitExceededException": exceptions.ItemCollectionSizeLimitExceededException, + } + + NumberRetries = 10 + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + validate_checksums = kwargs.pop('validate_checksums', True) + if not region: + region_name = boto.config.get('DynamoDB', 'region', + self.DefaultRegionName) + for reg in boto.dynamodb2.regions(): + if reg.name == region_name: + region = reg + break + + # Only set host if it isn't manually overwritten + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + + super(DynamoDBConnection, self).__init__(**kwargs) + self.region = region + self._validate_checksums = boto.config.getbool( + 'DynamoDB', 'validate_checksums', validate_checksums) + self.throughput_exceeded_events = 0 + + def _required_auth_capability(self): + return ['hmac-v4'] + + def batch_get_item(self, request_items, return_consumed_capacity=None): + """ + The BatchGetItem operation returns the attributes of one or + more items from one or more tables. You identify requested + items by primary key. + + A single operation can retrieve up to 16 MB of data, which can + contain as many as 100 items. BatchGetItem will return a + partial result if the response size limit is exceeded, the + table's provisioned throughput is exceeded, or an internal + processing failure occurs. If a partial result is returned, + the operation returns a value for UnprocessedKeys . You can + use this value to retry the operation starting with the next + item to get. + + For example, if you ask to retrieve 100 items, but each + individual item is 300 KB in size, the system returns 52 items + (so as not to exceed the 16 MB limit). It also returns an + appropriate UnprocessedKeys value so you can get the next page + of results. If desired, your application can include its own + logic to assemble the pages of results into one data set. + + If none of the items can be processed due to insufficient + provisioned throughput on all of the tables in the request, + then BatchGetItem will return a + ProvisionedThroughputExceededException . If at least one of + the items is successfully processed, then BatchGetItem + completes successfully, while returning the keys of the unread + items in UnprocessedKeys . + + If DynamoDB returns any unprocessed items, you should retry + the batch operation on those items. However, we strongly + recommend that you use an exponential backoff algorithm . If + you retry the batch operation immediately, the underlying read + or write requests can still fail due to throttling on the + individual tables. If you delay the batch operation using + exponential backoff, the individual requests in the batch are + much more likely to succeed. + + For more information, go to `Batch Operations and Error + Handling`_ in the Amazon DynamoDB Developer Guide . + + By default, BatchGetItem performs eventually consistent reads + on every table in the request. If you want strongly consistent + reads instead, you can set ConsistentRead to `True` for any or + all tables. + + In order to minimize response latency, BatchGetItem retrieves + items in parallel. + + When designing your application, keep in mind that DynamoDB + does not return attributes in any particular order. To help + parse the response by item, include the primary key values for + the items in your request in the AttributesToGet parameter. + + If a requested item does not exist, it is not returned in the + result. Requests for nonexistent items consume the minimum + read capacity units according to the type of read. For more + information, see `Capacity Units Calculations`_ in the Amazon + DynamoDB Developer Guide . + + :type request_items: map + :param request_items: + A map of one or more table names and, for each table, the corresponding + primary keys for the items to retrieve. Each table name can be + invoked only once. + + Each element in the map consists of the following: + + + + Keys - An array of primary key attribute values that define specific + items in the table. For each primary key, you must provide all of + the key attributes. For example, with a hash type primary key, you + only need to specify the hash attribute. For a hash-and-range type + primary key, you must specify both the hash attribute and the range + attribute. + + AttributesToGet - One or more attributes to be retrieved from the + table. By default, all attributes are returned. If a specified + attribute is not found, it does not appear in the result. Note that + AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. + + ConsistentRead - If `True`, a strongly consistent read is used; if + `False` (the default), an eventually consistent read is used. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + """ + params = {'RequestItems': request_items, } + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + return self.make_request(action='BatchGetItem', + body=json.dumps(params)) + + def batch_write_item(self, request_items, return_consumed_capacity=None, + return_item_collection_metrics=None): + """ + The BatchWriteItem operation puts or deletes multiple items in + one or more tables. A single call to BatchWriteItem can write + up to 16 MB of data, which can comprise as many as 25 put or + delete requests. Individual items to be written can be as + large as 400 KB. + + + BatchWriteItem cannot update items. To update items, use the + UpdateItem API. + + + The individual PutItem and DeleteItem operations specified in + BatchWriteItem are atomic; however BatchWriteItem as a whole + is not. If any requested operations fail because the table's + provisioned throughput is exceeded or an internal processing + failure occurs, the failed operations are returned in the + UnprocessedItems response parameter. You can investigate and + optionally resend the requests. Typically, you would call + BatchWriteItem in a loop. Each iteration would check for + unprocessed items and submit a new BatchWriteItem request with + those unprocessed items until all items have been processed. + + Note that if none of the items can be processed due to + insufficient provisioned throughput on all of the tables in + the request, then BatchWriteItem will return a + ProvisionedThroughputExceededException . + + If DynamoDB returns any unprocessed items, you should retry + the batch operation on those items. However, we strongly + recommend that you use an exponential backoff algorithm . If + you retry the batch operation immediately, the underlying read + or write requests can still fail due to throttling on the + individual tables. If you delay the batch operation using + exponential backoff, the individual requests in the batch are + much more likely to succeed. + + For more information, go to `Batch Operations and Error + Handling`_ in the Amazon DynamoDB Developer Guide . + + With BatchWriteItem , you can efficiently write or delete + large amounts of data, such as from Amazon Elastic MapReduce + (EMR), or copy data from another database into DynamoDB. In + order to improve performance with these large-scale + operations, BatchWriteItem does not behave in the same way as + individual PutItem and DeleteItem calls would For example, you + cannot specify conditions on individual put and delete + requests, and BatchWriteItem does not return deleted items in + the response. + + If you use a programming language that supports concurrency, + such as Java, you can use threads to write items in parallel. + Your application must include the necessary logic to manage + the threads. With languages that don't support threading, such + as PHP, you must update or delete the specified items one at a + time. In both situations, BatchWriteItem provides an + alternative where the API performs the specified put and + delete operations in parallel, giving you the power of the + thread pool approach without having to introduce complexity + into your application. + + Parallel processing reduces latency, but each specified put + and delete request consumes the same number of write capacity + units whether it is processed in parallel or not. Delete + operations on nonexistent items consume one write capacity + unit. + + If one or more of the following is true, DynamoDB rejects the + entire batch write operation: + + + + One or more tables specified in the BatchWriteItem request + does not exist. + + Primary key attributes specified on an item in the request + do not match those in the corresponding table's primary key + schema. + + You try to perform multiple operations on the same item in + the same BatchWriteItem request. For example, you cannot put + and delete the same item in the same BatchWriteItem request. + + There are more than 25 requests in the batch. + + Any individual item in a batch exceeds 400 KB. + + The total request size exceeds 16 MB. + + :type request_items: map + :param request_items: + A map of one or more table names and, for each table, a list of + operations to be performed ( DeleteRequest or PutRequest ). Each + element in the map consists of the following: + + + + DeleteRequest - Perform a DeleteItem operation on the specified item. + The item to be deleted is identified by a Key subelement: + + + Key - A map of primary key attribute values that uniquely identify + the ! item. Each entry in this map consists of an attribute name + and an attribute value. For each primary key, you must provide all + of the key attributes. For example, with a hash type primary key, + you only need to specify the hash attribute. For a hash-and-range + type primary key, you must specify both the hash attribute and the + range attribute. + + + PutRequest - Perform a PutItem operation on the specified item. The + item to be put is identified by an Item subelement: + + + Item - A map of attributes and their values. Each entry in this map + consists of an attribute name and an attribute value. Attribute + values must not be null; string and binary type attributes must + have lengths greater than zero; and set type attributes must not be + empty. Requests that contain empty values will be rejected with a + ValidationException exception. If you specify any attributes that + are part of an index key, then the data types for those attributes + must match those of the schema in the table's attribute definition. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type return_item_collection_metrics: string + :param return_item_collection_metrics: A value that if set to `SIZE`, + the response includes statistics about item collections, if any, + that were modified during the operation are returned in the + response. If set to `NONE` (the default), no statistics are + returned. + + """ + params = {'RequestItems': request_items, } + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if return_item_collection_metrics is not None: + params['ReturnItemCollectionMetrics'] = return_item_collection_metrics + return self.make_request(action='BatchWriteItem', + body=json.dumps(params)) + + def create_table(self, attribute_definitions, table_name, key_schema, + provisioned_throughput, local_secondary_indexes=None, + global_secondary_indexes=None): + """ + The CreateTable operation adds a new table to your account. In + an AWS account, table names must be unique within each region. + That is, you can have two tables with same name if you create + the tables in different regions. + + CreateTable is an asynchronous operation. Upon receiving a + CreateTable request, DynamoDB immediately returns a response + with a TableStatus of `CREATING`. After the table is created, + DynamoDB sets the TableStatus to `ACTIVE`. You can perform + read and write operations only on an `ACTIVE` table. + + You can optionally define secondary indexes on the new table, + as part of the CreateTable operation. If you want to create + multiple tables with secondary indexes on them, you must + create the tables sequentially. Only one table with secondary + indexes can be in the `CREATING` state at any given time. + + You can use the DescribeTable API to check the table status. + + :type attribute_definitions: list + :param attribute_definitions: An array of attributes that describe the + key schema for the table and indexes. + + :type table_name: string + :param table_name: The name of the table to create. + + :type key_schema: list + :param key_schema: Specifies the attributes that make up the primary + key for a table or an index. The attributes in KeySchema must also + be defined in the AttributeDefinitions array. For more information, + see `Data Model`_ in the Amazon DynamoDB Developer Guide . + Each KeySchemaElement in the array is composed of: + + + + AttributeName - The name of this key attribute. + + KeyType - Determines whether the key attribute is `HASH` or `RANGE`. + + + For a primary key that consists of a hash attribute, you must specify + exactly one element with a KeyType of `HASH`. + + For a primary key that consists of hash and range attributes, you must + specify exactly two elements, in this order: The first element must + have a KeyType of `HASH`, and the second element must have a + KeyType of `RANGE`. + + For more information, see `Specifying the Primary Key`_ in the Amazon + DynamoDB Developer Guide . + + :type local_secondary_indexes: list + :param local_secondary_indexes: + One or more local secondary indexes (the maximum is five) to be created + on the table. Each index is scoped to a given hash key value. There + is a 10 GB size limit per hash key; otherwise, the size of a local + secondary index is unconstrained. + + Each local secondary index in the array includes the following: + + + + IndexName - The name of the local secondary index. Must be unique + only for this table. + + KeySchema - Specifies the key schema for the local secondary index. + The key schema must begin with the same hash key attribute as the + table. + + Projection - Specifies attributes that are copied (projected) from + the table into the index. These are in addition to the primary key + attributes and index key attributes, which are automatically + projected. Each attribute specification is composed of: + + + ProjectionType - One of the following: + + + `KEYS_ONLY` - Only the index and primary keys are projected into the + index. + + `INCLUDE` - Only the specified table attributes are projected into + the index. The list of projected attributes are in NonKeyAttributes + . + + `ALL` - All of the table attributes are projected into the index. + + + NonKeyAttributes - A list of one or more non-key attribute names that + are projected into the secondary index. The total count of + attributes specified in NonKeyAttributes , summed across all of the + secondary indexes, must not exceed 20. If you project the same + attribute into two different indexes, this counts as two distinct + attributes when determining the total. + + :type global_secondary_indexes: list + :param global_secondary_indexes: + One or more global secondary indexes (the maximum is five) to be + created on the table. Each global secondary index in the array + includes the following: + + + + IndexName - The name of the global secondary index. Must be unique + only for this table. + + KeySchema - Specifies the key schema for the global secondary index. + + Projection - Specifies attributes that are copied (projected) from + the table into the index. These are in addition to the primary key + attributes and index key attributes, which are automatically + projected. Each attribute specification is composed of: + + + ProjectionType - One of the following: + + + `KEYS_ONLY` - Only the index and primary keys are projected into the + index. + + `INCLUDE` - Only the specified table attributes are projected into + the index. The list of projected attributes are in NonKeyAttributes + . + + `ALL` - All of the table attributes are projected into the index. + + + NonKeyAttributes - A list of one or more non-key attribute names that + are projected into the secondary index. The total count of + attributes specified in NonKeyAttributes , summed across all of the + secondary indexes, must not exceed 20. If you project the same + attribute into two different indexes, this counts as two distinct + attributes when determining the total. + + + ProvisionedThroughput - The provisioned throughput settings for the + global secondary index, consisting of read and write capacity + units. + + :type provisioned_throughput: dict + :param provisioned_throughput: Represents the provisioned throughput + settings for a specified table or index. The settings can be + modified using the UpdateTable operation. + For current minimum and maximum provisioned throughput values, see + `Limits`_ in the Amazon DynamoDB Developer Guide . + + """ + params = { + 'AttributeDefinitions': attribute_definitions, + 'TableName': table_name, + 'KeySchema': key_schema, + 'ProvisionedThroughput': provisioned_throughput, + } + if local_secondary_indexes is not None: + params['LocalSecondaryIndexes'] = local_secondary_indexes + if global_secondary_indexes is not None: + params['GlobalSecondaryIndexes'] = global_secondary_indexes + return self.make_request(action='CreateTable', + body=json.dumps(params)) + + def delete_item(self, table_name, key, expected=None, + conditional_operator=None, return_values=None, + return_consumed_capacity=None, + return_item_collection_metrics=None, + condition_expression=None, + expression_attribute_names=None, + expression_attribute_values=None): + """ + Deletes a single item in a table by primary key. You can + perform a conditional delete operation that deletes the item + if it exists, or if it has an expected attribute value. + + In addition to deleting an item, you can also return the + item's attribute values in the same operation, using the + ReturnValues parameter. + + Unless you specify conditions, the DeleteItem is an idempotent + operation; running it multiple times on the same item or + attribute does not result in an error response. + + Conditional deletes are useful for deleting items only if + specific conditions are met. If those conditions are met, + DynamoDB performs the delete. Otherwise, the item is not + deleted. + + :type table_name: string + :param table_name: The name of the table from which to delete the item. + + :type key: map + :param key: A map of attribute names to AttributeValue objects, + representing the primary key of the item to delete. + For the primary key, you must provide all of the attributes. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. + + :type expected: map + :param expected: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use Expected and ConditionExpression at the same + time, DynamoDB will return a ValidationException exception. + + This parameter does not support lists or maps. + + A map of attribute/condition pairs. Expected provides a conditional + block for the DeleteItem operation. + + Each element of Expected consists of an attribute name, a comparison + operator, and one or more values. DynamoDB compares the attribute + with the value(s) you supplied, using the comparison operator. For + each Expected element, the result of the evaluation is either true + or false. + + If you specify more than one element in the Expected map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + If the Expected map evaluates to true, then the conditional operation + succeeds; otherwise, it fails. + + Expected contains the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `a` is greater than `B`. For a list of + code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For type Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes in the + AttributeValueList . When performing the comparison, DynamoDB uses + strongly consistent reads. The following comparison operators are + available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | + CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following + are descriptions of each comparison operator. + + + `EQ` : Equal. `EQ` is supported for all datatypes, including lists + and maps. AttributeValueList can contain only one AttributeValue + element of type String, Number, Binary, String Set, Number Set, or + Binary Set. If an item contains an AttributeValue element of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `NE` : Not equal. `NE` is supported for all datatypes, including + lists and maps. AttributeValueList can contain only one + AttributeValue of type String, Number, Binary, String Set, Number + Set, or Binary Set. If an item contains an AttributeValue of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set type). + If an item contains an AttributeValue element of a different type + than the one specified in the request, the value does not match. + For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all + datatypes, including lists and maps. This operator tests for the + existence of an attribute, not its data type. If the data type of + attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the + result is a Boolean true . This result is because the attribute " + `a`" exists; its data type is not relevant to the `NOT_NULL` + comparison operator. + + `NULL` : The attribute does not exist. `NULL` is supported for all + datatypes, including lists and maps. This operator tests for the + nonexistence of an attribute, not its data type. If the data type + of attribute " `a`" is null, and you evaluate it using `NULL`, the + result is a Boolean false . This is because the attribute " `a`" + exists; its data type is not relevant to the `NULL` comparison + operator. + + `CONTAINS` : Checks for a subsequence, or value in a set. + AttributeValueList can contain only one AttributeValue element of + type String, Number, or Binary (not a set type). If the target + attribute of the comparison is of type String, then the operator + checks for a substring match. If the target attribute of the + comparison is of type Binary, then the operator looks for a + subsequence of the target that matches the input. If the target + attribute of the comparison is a set (" `SS`", " `NS`", or " + `BS`"), then the operator evaluates to true if it finds an exact + match with any member of the set. CONTAINS is supported for lists: + When evaluating " `a CONTAINS b`", " `a`" can be a list; however, " + `b`" cannot be a set, a map, or a list. + + `NOT_CONTAINS` : Checks for absence of a subsequence, or absence of a + value in a set. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If the target attribute of the comparison is a String, then + the operator checks for the absence of a substring match. If the + target attribute of the comparison is Binary, then the operator + checks for the absence of a subsequence of the target that matches + the input. If the target attribute of the comparison is a set (" + `SS`", " `NS`", or " `BS`"), then the operator evaluates to true if + it does not find an exact match with any member of the set. + NOT_CONTAINS is supported for lists: When evaluating " `a NOT + CONTAINS b`", " `a`" can be a list; however, " `b`" cannot be a + set, a map, or a list. + + `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set type). The target attribute of the comparison must be of type + String or Binary (not a Number or a set type). >
  • + + `IN` : Checks for matching elements within two sets. + AttributeValueList can contain one or more AttributeValue elements + of type String, Number, or Binary (not a set type). These + attributes are compared against an existing set type attribute of + an item. If any elements of the input set are present in the item + attribute, the expression evaluates to true. + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set type). A target attribute matches if the target + value is greater than, or equal to, the first element and less + than, or equal to, the second element. If an item contains an + AttributeValue element of a different type than the one specified + in the request, the value does not match. For example, `{"S":"6"}` + does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare + to `{"NS":["6", "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer + Guide . + + For backward compatibility with previous DynamoDB releases, the + following parameters can be used instead of AttributeValueList and + ComparisonOperator : + + + + Value - A value for DynamoDB to compare with an attribute. + + Exists - A Boolean value that causes DynamoDB to evaluate the value + before attempting the conditional operation: + + + If Exists is `True`, DynamoDB will check to see if that attribute + value already exists in the table. If it is found, then the + condition evaluates to true; otherwise the condition evaluate to + false. + + If Exists is `False`, DynamoDB assumes that the attribute value does + not exist in the table. If in fact the value does not exist, then + the assumption is valid and the condition evaluates to true. If the + value is found, despite the assumption that it does not exist, the + condition evaluates to false. + Note that the default value for Exists is `True`. + + + The Value and Exists parameters are incompatible with + AttributeValueList and ComparisonOperator . Note that if you use + both sets of parameters at once, DynamoDB will return a + ValidationException exception. + + :type conditional_operator: string + :param conditional_operator: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use ConditionalOperator and ConditionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter does not support lists or maps. + + A logical operator to apply to the conditions in the Expected map: + + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. + + :type return_values: string + :param return_values: + Use ReturnValues if you want to get the item attributes as they + appeared before they were deleted. For DeleteItem , the valid + values are: + + + + `NONE` - If ReturnValues is not specified, or if its value is `NONE`, + then nothing is returned. (This setting is the default for + ReturnValues .) + + `ALL_OLD` - The content of the old item is returned. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type return_item_collection_metrics: string + :param return_item_collection_metrics: A value that if set to `SIZE`, + the response includes statistics about item collections, if any, + that were modified during the operation are returned in the + response. If set to `NONE` (the default), no statistics are + returned. + + :type condition_expression: string + :param condition_expression: A condition that must be satisfied in + order for a conditional DeleteItem to succeed. + An expression can contain any of the following: + + + + Boolean functions: `attribute_exists | attribute_not_exists | + contains | begins_with` These function names are case-sensitive. + + Comparison operators: ` = | <> | < | > | <= + | >= | BETWEEN | IN` + + Logical operators: `AND | OR | NOT` + + + For more information on condition expressions, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + + + To shorten an attribute name that is very long or unwieldy in an + expression. + + To create a placeholder for repeating occurrences of an attribute + name in an expression. + + To prevent special characters in an attribute name from being + misinterpreted in an expression. + + + Use the **#** character in an expression to dereference an attribute + name. For example, consider the following expression: + + + + `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName + = "Jones"` + + + Now suppose that you specified the following for + ExpressionAttributeNames : + + + + `{"#name":"order.customerInfo.LastName"}` + + + The expression can now be simplified as follows: + + + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + + `Available | Backordered | Discontinued` + + You would first need to specify ExpressionAttributeValues as follows: + + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` + + You could then use these values in an expression, such as this: + + `ProductStatus IN (:avail, :back, :disc)` + + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + """ + params = {'TableName': table_name, 'Key': key, } + if expected is not None: + params['Expected'] = expected + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator + if return_values is not None: + params['ReturnValues'] = return_values + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if return_item_collection_metrics is not None: + params['ReturnItemCollectionMetrics'] = return_item_collection_metrics + if condition_expression is not None: + params['ConditionExpression'] = condition_expression + if expression_attribute_names is not None: + params['ExpressionAttributeNames'] = expression_attribute_names + if expression_attribute_values is not None: + params['ExpressionAttributeValues'] = expression_attribute_values + return self.make_request(action='DeleteItem', + body=json.dumps(params)) + + def delete_table(self, table_name): + """ + The DeleteTable operation deletes a table and all of its + items. After a DeleteTable request, the specified table is in + the `DELETING` state until DynamoDB completes the deletion. If + the table is in the `ACTIVE` state, you can delete it. If a + table is in `CREATING` or `UPDATING` states, then DynamoDB + returns a ResourceInUseException . If the specified table does + not exist, DynamoDB returns a ResourceNotFoundException . If + table is already in the `DELETING` state, no error is + returned. + + + DynamoDB might continue to accept data read and write + operations, such as GetItem and PutItem , on a table in the + `DELETING` state until the table deletion is complete. + + + When you delete a table, any indexes on that table are also + deleted. + + Use the DescribeTable API to check the status of the table. + + :type table_name: string + :param table_name: The name of the table to delete. + + """ + params = {'TableName': table_name, } + return self.make_request(action='DeleteTable', + body=json.dumps(params)) + + def describe_table(self, table_name): + """ + Returns information about the table, including the current + status of the table, when it was created, the primary key + schema, and any indexes on the table. + + + If you issue a DescribeTable request immediately after a + CreateTable request, DynamoDB might return a + ResourceNotFoundException. This is because DescribeTable uses + an eventually consistent query, and the metadata for your + table might not be available at that moment. Wait for a few + seconds, and then try the DescribeTable request again. + + :type table_name: string + :param table_name: The name of the table to describe. + + """ + params = {'TableName': table_name, } + return self.make_request(action='DescribeTable', + body=json.dumps(params)) + + def get_item(self, table_name, key, attributes_to_get=None, + consistent_read=None, return_consumed_capacity=None, + projection_expression=None, expression_attribute_names=None): + """ + The GetItem operation returns a set of attributes for the item + with the given primary key. If there is no matching item, + GetItem does not return any data. + + GetItem provides an eventually consistent read by default. If + your application requires a strongly consistent read, set + ConsistentRead to `True`. Although a strongly consistent read + might take more time than an eventually consistent read, it + always returns the last updated value. + + :type table_name: string + :param table_name: The name of the table containing the requested item. + + :type key: map + :param key: A map of attribute names to AttributeValue objects, + representing the primary key of the item to retrieve. + For the primary key, you must provide all of the attributes. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. + + :type attributes_to_get: list + :param attributes_to_get: + There is a newer parameter available. Use ProjectionExpression instead. + Note that if you use AttributesToGet and ProjectionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter allows you to retrieve lists or maps; however, it cannot + retrieve individual list or map elements. + + The names of one or more attributes to retrieve. If no attribute names + are specified, then all attributes will be returned. If any of the + requested attributes are not found, they will not appear in the + result. + + Note that AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. + + :type consistent_read: boolean + :param consistent_read: A value that if set to `True`, then the + operation uses strongly consistent reads; otherwise, eventually + consistent reads are used. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type projection_expression: string + :param projection_expression: A string that identifies one or more + attributes to retrieve from the table. These attributes can include + scalars, sets, or elements of a JSON document. The attributes in + the expression must be separated by commas. + If no attribute names are specified, then all attributes will be + returned. If any of the requested attributes are not found, they + will not appear in the result. + + For more information on projection expressions, go to `Accessing Item + Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + + + To shorten an attribute name that is very long or unwieldy in an + expression. + + To create a placeholder for repeating occurrences of an attribute + name in an expression. + + To prevent special characters in an attribute name from being + misinterpreted in an expression. + + + Use the **#** character in an expression to dereference an attribute + name. For example, consider the following expression: + + + + `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName + = "Jones"` + + + Now suppose that you specified the following for + ExpressionAttributeNames : + + + + `{"#name":"order.customerInfo.LastName"}` + + + The expression can now be simplified as follows: + + + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + + """ + params = {'TableName': table_name, 'Key': key, } + if attributes_to_get is not None: + params['AttributesToGet'] = attributes_to_get + if consistent_read is not None: + params['ConsistentRead'] = consistent_read + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if projection_expression is not None: + params['ProjectionExpression'] = projection_expression + if expression_attribute_names is not None: + params['ExpressionAttributeNames'] = expression_attribute_names + return self.make_request(action='GetItem', + body=json.dumps(params)) + + def list_tables(self, exclusive_start_table_name=None, limit=None): + """ + Returns an array of table names associated with the current + account and endpoint. The output from ListTables is paginated, + with each page returning a maximum of 100 table names. + + :type exclusive_start_table_name: string + :param exclusive_start_table_name: The first table name that this + operation will evaluate. Use the value that was returned for + LastEvaluatedTableName in a previous operation, so that you can + obtain the next page of results. + + :type limit: integer + :param limit: A maximum number of table names to return. If this + parameter is not specified, the limit is 100. + + """ + params = {} + if exclusive_start_table_name is not None: + params['ExclusiveStartTableName'] = exclusive_start_table_name + if limit is not None: + params['Limit'] = limit + return self.make_request(action='ListTables', + body=json.dumps(params)) + + def put_item(self, table_name, item, expected=None, return_values=None, + return_consumed_capacity=None, + return_item_collection_metrics=None, + conditional_operator=None, condition_expression=None, + expression_attribute_names=None, + expression_attribute_values=None): + """ + Creates a new item, or replaces an old item with a new item. + If an item that has the same primary key as the new item + already exists in the specified table, the new item completely + replaces the existing item. You can perform a conditional put + operation (add a new item if one with the specified primary + key doesn't exist), or replace an existing item if it has + certain attribute values. + + In addition to putting an item, you can also return the item's + attribute values in the same operation, using the ReturnValues + parameter. + + When you add an item, the primary key attribute(s) are the + only required attributes. Attribute values cannot be null. + String and Binary type attributes must have lengths greater + than zero. Set type attributes cannot be empty. Requests with + empty values will be rejected with a ValidationException + exception. + + You can request that PutItem return either a copy of the + original item (before the update) or a copy of the updated + item (after the update). For more information, see the + ReturnValues description below. + + + To prevent a new item from replacing an existing item, use a + conditional put operation with ComparisonOperator set to + `NULL` for the primary key attribute, or attributes. + + + For more information about using this API, see `Working with + Items`_ in the Amazon DynamoDB Developer Guide . + + :type table_name: string + :param table_name: The name of the table to contain the item. + + :type item: map + :param item: A map of attribute name/value pairs, one for each + attribute. Only the primary key attributes are required; you can + optionally provide other attribute name-value pairs for the item. + You must provide all of the attributes for the primary key. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. + + If you specify any attributes that are part of an index key, then the + data types for those attributes must match those of the schema in + the table's attribute definition. + + For more information about primary keys, see `Primary Key`_ in the + Amazon DynamoDB Developer Guide . + + Each element in the Item map is an AttributeValue object. + + :type expected: map + :param expected: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use Expected and ConditionExpression at the same + time, DynamoDB will return a ValidationException exception. + + This parameter does not support lists or maps. + + A map of attribute/condition pairs. Expected provides a conditional + block for the PutItem operation. + + Each element of Expected consists of an attribute name, a comparison + operator, and one or more values. DynamoDB compares the attribute + with the value(s) you supplied, using the comparison operator. For + each Expected element, the result of the evaluation is either true + or false. + + If you specify more than one element in the Expected map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + If the Expected map evaluates to true, then the conditional operation + succeeds; otherwise, it fails. + + Expected contains the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `a` is greater than `B`. For a list of + code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For type Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes in the + AttributeValueList . When performing the comparison, DynamoDB uses + strongly consistent reads. The following comparison operators are + available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | + CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following + are descriptions of each comparison operator. + + + `EQ` : Equal. `EQ` is supported for all datatypes, including lists + and maps. AttributeValueList can contain only one AttributeValue + element of type String, Number, Binary, String Set, Number Set, or + Binary Set. If an item contains an AttributeValue element of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `NE` : Not equal. `NE` is supported for all datatypes, including + lists and maps. AttributeValueList can contain only one + AttributeValue of type String, Number, Binary, String Set, Number + Set, or Binary Set. If an item contains an AttributeValue of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set type). + If an item contains an AttributeValue element of a different type + than the one specified in the request, the value does not match. + For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all + datatypes, including lists and maps. This operator tests for the + existence of an attribute, not its data type. If the data type of + attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the + result is a Boolean true . This result is because the attribute " + `a`" exists; its data type is not relevant to the `NOT_NULL` + comparison operator. + + `NULL` : The attribute does not exist. `NULL` is supported for all + datatypes, including lists and maps. This operator tests for the + nonexistence of an attribute, not its data type. If the data type + of attribute " `a`" is null, and you evaluate it using `NULL`, the + result is a Boolean false . This is because the attribute " `a`" + exists; its data type is not relevant to the `NULL` comparison + operator. + + `CONTAINS` : Checks for a subsequence, or value in a set. + AttributeValueList can contain only one AttributeValue element of + type String, Number, or Binary (not a set type). If the target + attribute of the comparison is of type String, then the operator + checks for a substring match. If the target attribute of the + comparison is of type Binary, then the operator looks for a + subsequence of the target that matches the input. If the target + attribute of the comparison is a set (" `SS`", " `NS`", or " + `BS`"), then the operator evaluates to true if it finds an exact + match with any member of the set. CONTAINS is supported for lists: + When evaluating " `a CONTAINS b`", " `a`" can be a list; however, " + `b`" cannot be a set, a map, or a list. + + `NOT_CONTAINS` : Checks for absence of a subsequence, or absence of a + value in a set. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If the target attribute of the comparison is a String, then + the operator checks for the absence of a substring match. If the + target attribute of the comparison is Binary, then the operator + checks for the absence of a subsequence of the target that matches + the input. If the target attribute of the comparison is a set (" + `SS`", " `NS`", or " `BS`"), then the operator evaluates to true if + it does not find an exact match with any member of the set. + NOT_CONTAINS is supported for lists: When evaluating " `a NOT + CONTAINS b`", " `a`" can be a list; however, " `b`" cannot be a + set, a map, or a list. + + `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set type). The target attribute of the comparison must be of type + String or Binary (not a Number or a set type). >
  • + + `IN` : Checks for matching elements within two sets. + AttributeValueList can contain one or more AttributeValue elements + of type String, Number, or Binary (not a set type). These + attributes are compared against an existing set type attribute of + an item. If any elements of the input set are present in the item + attribute, the expression evaluates to true. + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set type). A target attribute matches if the target + value is greater than, or equal to, the first element and less + than, or equal to, the second element. If an item contains an + AttributeValue element of a different type than the one specified + in the request, the value does not match. For example, `{"S":"6"}` + does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare + to `{"NS":["6", "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer + Guide . + + For backward compatibility with previous DynamoDB releases, the + following parameters can be used instead of AttributeValueList and + ComparisonOperator : + + + + Value - A value for DynamoDB to compare with an attribute. + + Exists - A Boolean value that causes DynamoDB to evaluate the value + before attempting the conditional operation: + + + If Exists is `True`, DynamoDB will check to see if that attribute + value already exists in the table. If it is found, then the + condition evaluates to true; otherwise the condition evaluate to + false. + + If Exists is `False`, DynamoDB assumes that the attribute value does + not exist in the table. If in fact the value does not exist, then + the assumption is valid and the condition evaluates to true. If the + value is found, despite the assumption that it does not exist, the + condition evaluates to false. + Note that the default value for Exists is `True`. + + + The Value and Exists parameters are incompatible with + AttributeValueList and ComparisonOperator . Note that if you use + both sets of parameters at once, DynamoDB will return a + ValidationException exception. + + :type return_values: string + :param return_values: + Use ReturnValues if you want to get the item attributes as they + appeared before they were updated with the PutItem request. For + PutItem , the valid values are: + + + + `NONE` - If ReturnValues is not specified, or if its value is `NONE`, + then nothing is returned. (This setting is the default for + ReturnValues .) + + `ALL_OLD` - If PutItem overwrote an attribute name-value pair, then + the content of the old item is returned. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type return_item_collection_metrics: string + :param return_item_collection_metrics: A value that if set to `SIZE`, + the response includes statistics about item collections, if any, + that were modified during the operation are returned in the + response. If set to `NONE` (the default), no statistics are + returned. + + :type conditional_operator: string + :param conditional_operator: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use ConditionalOperator and ConditionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter does not support lists or maps. + + A logical operator to apply to the conditions in the Expected map: + + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. + + :type condition_expression: string + :param condition_expression: A condition that must be satisfied in + order for a conditional PutItem operation to succeed. + An expression can contain any of the following: + + + + Boolean functions: `attribute_exists | attribute_not_exists | + contains | begins_with` These function names are case-sensitive. + + Comparison operators: ` = | <> | < | > | <= + | >= | BETWEEN | IN` + + Logical operators: `AND | OR | NOT` + + + For more information on condition expressions, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + + + To shorten an attribute name that is very long or unwieldy in an + expression. + + To create a placeholder for repeating occurrences of an attribute + name in an expression. + + To prevent special characters in an attribute name from being + misinterpreted in an expression. + + + Use the **#** character in an expression to dereference an attribute + name. For example, consider the following expression: + + + + `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName + = "Jones"` + + + Now suppose that you specified the following for + ExpressionAttributeNames : + + + + `{"#name":"order.customerInfo.LastName"}` + + + The expression can now be simplified as follows: + + + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + + `Available | Backordered | Discontinued` + + You would first need to specify ExpressionAttributeValues as follows: + + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` + + You could then use these values in an expression, such as this: + + `ProductStatus IN (:avail, :back, :disc)` + + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + """ + params = {'TableName': table_name, 'Item': item, } + if expected is not None: + params['Expected'] = expected + if return_values is not None: + params['ReturnValues'] = return_values + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if return_item_collection_metrics is not None: + params['ReturnItemCollectionMetrics'] = return_item_collection_metrics + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator + if condition_expression is not None: + params['ConditionExpression'] = condition_expression + if expression_attribute_names is not None: + params['ExpressionAttributeNames'] = expression_attribute_names + if expression_attribute_values is not None: + params['ExpressionAttributeValues'] = expression_attribute_values + return self.make_request(action='PutItem', + body=json.dumps(params)) + + def query(self, table_name, key_conditions, index_name=None, select=None, + attributes_to_get=None, limit=None, consistent_read=None, + query_filter=None, conditional_operator=None, + scan_index_forward=None, exclusive_start_key=None, + return_consumed_capacity=None, projection_expression=None, + filter_expression=None, expression_attribute_names=None, + expression_attribute_values=None): + """ + A Query operation directly accesses items from a table using + the table primary key, or from an index using the index key. + You must provide a specific hash key value. You can narrow the + scope of the query by using comparison operators on the range + key value, or on the index key. You can use the + ScanIndexForward parameter to get results in forward or + reverse order, by range key or by index key. + + Queries that do not return results consume the minimum number + of read capacity units for that type of read operation. + + If the total number of items meeting the query criteria + exceeds the result set size limit of 1 MB, the query stops and + results are returned to the user with LastEvaluatedKey to + continue the query in a subsequent operation. Unlike a Scan + operation, a Query operation never returns both an empty + result set and a LastEvaluatedKey . The LastEvaluatedKey is + only provided if the results exceed 1 MB, or if you have used + Limit . + + You can query a table, a local secondary index, or a global + secondary index. For a query on a table or on a local + secondary index, you can set ConsistentRead to true and obtain + a strongly consistent result. Global secondary indexes support + eventually consistent reads only, so do not specify + ConsistentRead when querying a global secondary index. + + :type table_name: string + :param table_name: The name of the table containing the requested + items. + + :type index_name: string + :param index_name: The name of an index to query. This index can be any + local secondary index or global secondary index on the table. + + :type select: string + :param select: The attributes to be returned in the result. You can + retrieve all item attributes, specific item attributes, the count + of matching items, or in the case of an index, some or all of the + attributes projected into the index. + + + `ALL_ATTRIBUTES` - Returns all of the item attributes from the + specified table or index. If you query a local secondary index, + then for each matching item in the index DynamoDB will fetch the + entire item from the parent table. If the index is configured to + project all item attributes, then all of the data can be obtained + from the local secondary index, and no fetching is required. + + `ALL_PROJECTED_ATTRIBUTES` - Allowed only when querying an index. + Retrieves all attributes that have been projected into the index. + If the index is configured to project all attributes, this return + value is equivalent to specifying `ALL_ATTRIBUTES`. + + `COUNT` - Returns the number of matching items, rather than the + matching items themselves. + + `SPECIFIC_ATTRIBUTES` - Returns only the attributes listed in + AttributesToGet . This return value is equivalent to specifying + AttributesToGet without specifying any value for Select . If you + query a local secondary index and request only attributes that are + projected into that index, the operation will read only the index + and not the table. If any of the requested attributes are not + projected into the local secondary index, DynamoDB will fetch each + of these attributes from the parent table. This extra fetching + incurs additional throughput cost and latency. If you query a + global secondary index, you can only request attributes that are + projected into the index. Global secondary index queries cannot + fetch attributes from the parent table. + + + If neither Select nor AttributesToGet are specified, DynamoDB defaults + to `ALL_ATTRIBUTES` when accessing a table, and + `ALL_PROJECTED_ATTRIBUTES` when accessing an index. You cannot use + both Select and AttributesToGet together in a single request, + unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage + is equivalent to specifying AttributesToGet without any value for + Select .) + + :type attributes_to_get: list + :param attributes_to_get: + There is a newer parameter available. Use ProjectionExpression instead. + Note that if you use AttributesToGet and ProjectionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter allows you to retrieve lists or maps; however, it cannot + retrieve individual list or map elements. + + The names of one or more attributes to retrieve. If no attribute names + are specified, then all attributes will be returned. If any of the + requested attributes are not found, they will not appear in the + result. + + Note that AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. + + You cannot use both AttributesToGet and Select together in a Query + request, unless the value for Select is `SPECIFIC_ATTRIBUTES`. + (This usage is equivalent to specifying AttributesToGet without any + value for Select .) + + If you query a local secondary index and request only attributes that + are projected into that index, the operation will read only the + index and not the table. If any of the requested attributes are not + projected into the local secondary index, DynamoDB will fetch each + of these attributes from the parent table. This extra fetching + incurs additional throughput cost and latency. + + If you query a global secondary index, you can only request attributes + that are projected into the index. Global secondary index queries + cannot fetch attributes from the parent table. + + :type limit: integer + :param limit: The maximum number of items to evaluate (not necessarily + the number of matching items). If DynamoDB processes the number of + items up to the limit while processing the results, it stops the + operation and returns the matching values up to that point, and a + key in LastEvaluatedKey to apply in a subsequent operation, so that + you can pick up where you left off. Also, if the processed data set + size exceeds 1 MB before DynamoDB reaches this limit, it stops the + operation and returns the matching values up to the limit, and a + key in LastEvaluatedKey to apply in a subsequent operation to + continue the operation. For more information, see `Query and Scan`_ + in the Amazon DynamoDB Developer Guide . + + :type consistent_read: boolean + :param consistent_read: A value that if set to `True`, then the + operation uses strongly consistent reads; otherwise, eventually + consistent reads are used. + Strongly consistent reads are not supported on global secondary + indexes. If you query a global secondary index with ConsistentRead + set to `True`, you will receive an error message. + + :type key_conditions: map + :param key_conditions: The selection criteria for the query. For a + query on a table, you can have conditions only on the table primary + key attributes. You must specify the hash key attribute name and + value as an `EQ` condition. You can optionally specify a second + condition, referring to the range key attribute. If you do not + specify a range key condition, all items under the hash key will be + fetched and processed. Any filters will applied after this. + For a query on an index, you can have conditions only on the index key + attributes. You must specify the index hash attribute name and + value as an EQ condition. You can optionally specify a second + condition, referring to the index key range attribute. + + Each KeyConditions element consists of an attribute name to compare, + along with the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `a` is greater than `B`. For a list of + code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes, for + example, equals, greater than, less than, and so on. For + KeyConditions , only the following comparison operators are + supported: `EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN` The + following are descriptions of these comparison operators. + + + `EQ` : Equal. AttributeValueList can contain only one AttributeValue + of type String, Number, or Binary (not a set type). If an item + contains an AttributeValue element of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + equal `{"NS":["6", "2", "1"]}`. + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set type). + If an item contains an AttributeValue element of a different type + than the one specified in the request, the value does not match. + For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set type). The target attribute of the comparison must be of type + String or Binary (not a Number or a set type). >
  • + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set type). A target attribute matches if the target + value is greater than, or equal to, the first element and less + than, or equal to, the second element. If an item contains an + AttributeValue element of a different type than the one specified + in the request, the value does not match. For example, `{"S":"6"}` + does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare + to `{"NS":["6", "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer + Guide . + + :type query_filter: map + :param query_filter: + There is a newer parameter available. Use FilterExpression instead. + Note that if you use QueryFilter and FilterExpression at the same + time, DynamoDB will return a ValidationException exception. + + This parameter does not support lists or maps. + + A condition that evaluates the query results after the items are read + and returns only the desired values. + Query filters are applied after the items are read, so they do not + limit the capacity used. + If you specify more than one condition in the QueryFilter map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + + QueryFilter does not allow key attributes. You cannot define a filter + condition on a hash key or range key. + + + Each QueryFilter element consists of an attribute name to compare, + along with the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + operator specified in ComparisonOperator . For type Number, value + comparisons are numeric. String value comparisons for greater than, + equals, or less than are based on ASCII character code values. For + example, `a` is greater than `A`, and `a` is greater than `B`. For + a list of code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For type Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. For information on specifying data + types in JSON, see `JSON Data Format`_ in the Amazon DynamoDB + Developer Guide . + + ComparisonOperator - A comparator for evaluating attributes. For + example, equals, greater than, less than, etc. The following + comparison operators are available: `EQ | NE | LE | LT | GE | GT | + NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | + BETWEEN` For complete descriptions of all comparison operators, see + `API_Condition.html`_. + + :type conditional_operator: string + :param conditional_operator: + This parameter does not support lists or maps. + + A logical operator to apply to the conditions in the QueryFilter map: + + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. + + :type scan_index_forward: boolean + :param scan_index_forward: A value that specifies ascending (true) or + descending (false) traversal of the index. DynamoDB returns results + reflecting the requested order determined by the range key. If the + data type is Number, the results are returned in numeric order. For + type String, the results are returned in order of ASCII character + code values. For type Binary, DynamoDB treats each byte of the + binary data as unsigned when it compares binary values. + If ScanIndexForward is not specified, the results are returned in + ascending order. + + :type exclusive_start_key: map + :param exclusive_start_key: The primary key of the first item that this + operation will evaluate. Use the value that was returned for + LastEvaluatedKey in the previous operation. + The data type for ExclusiveStartKey must be String, Number or Binary. + No set data types are allowed. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type projection_expression: string + :param projection_expression: A string that identifies one or more + attributes to retrieve from the table. These attributes can include + scalars, sets, or elements of a JSON document. The attributes in + the expression must be separated by commas. + If no attribute names are specified, then all attributes will be + returned. If any of the requested attributes are not found, they + will not appear in the result. + + For more information on projection expressions, go to `Accessing Item + Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type filter_expression: string + :param filter_expression: A condition that evaluates the query results + after the items are read and returns only the desired values. + The condition you specify is applied to the items queried; any items + that do not match the expression are not returned. + Filter expressions are applied after the items are read, so they do not + limit the capacity used. + A FilterExpression has the same syntax as a ConditionExpression . For + more information on expression syntax, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + + + To shorten an attribute name that is very long or unwieldy in an + expression. + + To create a placeholder for repeating occurrences of an attribute + name in an expression. + + To prevent special characters in an attribute name from being + misinterpreted in an expression. + + + Use the **#** character in an expression to dereference an attribute + name. For example, consider the following expression: + + + + `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName + = "Jones"` + + + Now suppose that you specified the following for + ExpressionAttributeNames : + + + + `{"#name":"order.customerInfo.LastName"}` + + + The expression can now be simplified as follows: + + + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + + `Available | Backordered | Discontinued` + + You would first need to specify ExpressionAttributeValues as follows: + + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` + + You could then use these values in an expression, such as this: + + `ProductStatus IN (:avail, :back, :disc)` + + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + """ + params = { + 'TableName': table_name, + 'KeyConditions': key_conditions, + } + if index_name is not None: + params['IndexName'] = index_name + if select is not None: + params['Select'] = select + if attributes_to_get is not None: + params['AttributesToGet'] = attributes_to_get + if limit is not None: + params['Limit'] = limit + if consistent_read is not None: + params['ConsistentRead'] = consistent_read + if query_filter is not None: + params['QueryFilter'] = query_filter + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator + if scan_index_forward is not None: + params['ScanIndexForward'] = scan_index_forward + if exclusive_start_key is not None: + params['ExclusiveStartKey'] = exclusive_start_key + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if projection_expression is not None: + params['ProjectionExpression'] = projection_expression + if filter_expression is not None: + params['FilterExpression'] = filter_expression + if expression_attribute_names is not None: + params['ExpressionAttributeNames'] = expression_attribute_names + if expression_attribute_values is not None: + params['ExpressionAttributeValues'] = expression_attribute_values + return self.make_request(action='Query', + body=json.dumps(params)) + + def scan(self, table_name, attributes_to_get=None, limit=None, + select=None, scan_filter=None, conditional_operator=None, + exclusive_start_key=None, return_consumed_capacity=None, + total_segments=None, segment=None, projection_expression=None, + filter_expression=None, expression_attribute_names=None, + expression_attribute_values=None): + """ + The Scan operation returns one or more items and item + attributes by accessing every item in the table. To have + DynamoDB return fewer items, you can provide a ScanFilter + operation. + + If the total number of scanned items exceeds the maximum data + set size limit of 1 MB, the scan stops and results are + returned to the user as a LastEvaluatedKey value to continue + the scan in a subsequent operation. The results also include + the number of items exceeding the limit. A scan can result in + no table data meeting the filter criteria. + + The result set is eventually consistent. + + By default, Scan operations proceed sequentially; however, for + faster performance on large tables, applications can request a + parallel Scan operation by specifying the Segment and + TotalSegments parameters. For more information, see `Parallel + Scan`_ in the Amazon DynamoDB Developer Guide . + + :type table_name: string + :param table_name: The name of the table containing the requested + items. + + :type attributes_to_get: list + :param attributes_to_get: + There is a newer parameter available. Use ProjectionExpression instead. + Note that if you use AttributesToGet and ProjectionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter allows you to retrieve lists or maps; however, it cannot + retrieve individual list or map elements. + + The names of one or more attributes to retrieve. If no attribute names + are specified, then all attributes will be returned. If any of the + requested attributes are not found, they will not appear in the + result. + + Note that AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. + + :type limit: integer + :param limit: The maximum number of items to evaluate (not necessarily + the number of matching items). If DynamoDB processes the number of + items up to the limit while processing the results, it stops the + operation and returns the matching values up to that point, and a + key in LastEvaluatedKey to apply in a subsequent operation, so that + you can pick up where you left off. Also, if the processed data set + size exceeds 1 MB before DynamoDB reaches this limit, it stops the + operation and returns the matching values up to the limit, and a + key in LastEvaluatedKey to apply in a subsequent operation to + continue the operation. For more information, see `Query and Scan`_ + in the Amazon DynamoDB Developer Guide . + + :type select: string + :param select: The attributes to be returned in the result. You can + retrieve all item attributes, specific item attributes, or the + count of matching items. + + + `ALL_ATTRIBUTES` - Returns all of the item attributes. + + `COUNT` - Returns the number of matching items, rather than the + matching items themselves. + + `SPECIFIC_ATTRIBUTES` - Returns only the attributes listed in + AttributesToGet . This return value is equivalent to specifying + AttributesToGet without specifying any value for Select . + + + If neither Select nor AttributesToGet are specified, DynamoDB defaults + to `ALL_ATTRIBUTES`. You cannot use both AttributesToGet and Select + together in a single request, unless the value for Select is + `SPECIFIC_ATTRIBUTES`. (This usage is equivalent to specifying + AttributesToGet without any value for Select .) + + :type scan_filter: map + :param scan_filter: + There is a newer parameter available. Use FilterExpression instead. + Note that if you use ScanFilter and FilterExpression at the same + time, DynamoDB will return a ValidationException exception. + + This parameter does not support lists or maps. + + A condition that evaluates the scan results and returns only the + desired values. + + If you specify more than one condition in the ScanFilter map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + Each ScanFilter element consists of an attribute name to compare, along + with the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + operator specified in ComparisonOperator . For type Number, value + comparisons are numeric. String value comparisons for greater than, + equals, or less than are based on ASCII character code values. For + example, `a` is greater than `A`, and `a` is greater than `B`. For + a list of code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. For information on specifying data + types in JSON, see `JSON Data Format`_ in the Amazon DynamoDB + Developer Guide . + + ComparisonOperator - A comparator for evaluating attributes. For + example, equals, greater than, less than, etc. The following + comparison operators are available: `EQ | NE | LE | LT | GE | GT | + NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | + BETWEEN` For complete descriptions of all comparison operators, see + `Condition`_. + + :type conditional_operator: string + :param conditional_operator: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use ConditionalOperator and ConditionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter does not support lists or maps. + + A logical operator to apply to the conditions in the ScanFilter map: + + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. + + :type exclusive_start_key: map + :param exclusive_start_key: The primary key of the first item that this + operation will evaluate. Use the value that was returned for + LastEvaluatedKey in the previous operation. + The data type for ExclusiveStartKey must be String, Number or Binary. + No set data types are allowed. + + In a parallel scan, a Scan request that includes ExclusiveStartKey must + specify the same segment whose previous Scan returned the + corresponding value of LastEvaluatedKey . + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type total_segments: integer + :param total_segments: For a parallel Scan request, TotalSegments + represents the total number of segments into which the Scan + operation will be divided. The value of TotalSegments corresponds + to the number of application workers that will perform the parallel + scan. For example, if you want to scan a table using four + application threads, specify a TotalSegments value of 4. + The value for TotalSegments must be greater than or equal to 1, and + less than or equal to 1000000. If you specify a TotalSegments value + of 1, the Scan operation will be sequential rather than parallel. + + If you specify TotalSegments , you must also specify Segment . + + :type segment: integer + :param segment: For a parallel Scan request, Segment identifies an + individual segment to be scanned by an application worker. + Segment IDs are zero-based, so the first segment is always 0. For + example, if you want to scan a table using four application + threads, the first thread specifies a Segment value of 0, the + second thread specifies 1, and so on. + + The value of LastEvaluatedKey returned from a parallel Scan request + must be used as ExclusiveStartKey with the same segment ID in a + subsequent Scan operation. + + The value for Segment must be greater than or equal to 0, and less than + the value provided for TotalSegments . + + If you specify Segment , you must also specify TotalSegments . + + :type projection_expression: string + :param projection_expression: A string that identifies one or more + attributes to retrieve from the table. These attributes can include + scalars, sets, or elements of a JSON document. The attributes in + the expression must be separated by commas. + If no attribute names are specified, then all attributes will be + returned. If any of the requested attributes are not found, they + will not appear in the result. + + For more information on projection expressions, go to `Accessing Item + Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type filter_expression: string + :param filter_expression: A condition that evaluates the scan results + and returns only the desired values. + The condition you specify is applied to the items scanned; any items + that do not match the expression are not returned. + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + + + To shorten an attribute name that is very long or unwieldy in an + expression. + + To create a placeholder for repeating occurrences of an attribute + name in an expression. + + To prevent special characters in an attribute name from being + misinterpreted in an expression. + + + Use the **#** character in an expression to dereference an attribute + name. For example, consider the following expression: + + + + `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName + = "Jones"` + + + Now suppose that you specified the following for + ExpressionAttributeNames : + + + + `{"#name":"order.customerInfo.LastName"}` + + + The expression can now be simplified as follows: + + + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + + `Available | Backordered | Discontinued` + + You would first need to specify ExpressionAttributeValues as follows: + + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` + + You could then use these values in an expression, such as this: + + `ProductStatus IN (:avail, :back, :disc)` + + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + """ + params = {'TableName': table_name, } + if attributes_to_get is not None: + params['AttributesToGet'] = attributes_to_get + if limit is not None: + params['Limit'] = limit + if select is not None: + params['Select'] = select + if scan_filter is not None: + params['ScanFilter'] = scan_filter + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator + if exclusive_start_key is not None: + params['ExclusiveStartKey'] = exclusive_start_key + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if total_segments is not None: + params['TotalSegments'] = total_segments + if segment is not None: + params['Segment'] = segment + if projection_expression is not None: + params['ProjectionExpression'] = projection_expression + if filter_expression is not None: + params['FilterExpression'] = filter_expression + if expression_attribute_names is not None: + params['ExpressionAttributeNames'] = expression_attribute_names + if expression_attribute_values is not None: + params['ExpressionAttributeValues'] = expression_attribute_values + return self.make_request(action='Scan', + body=json.dumps(params)) + + def update_item(self, table_name, key, attribute_updates=None, + expected=None, conditional_operator=None, + return_values=None, return_consumed_capacity=None, + return_item_collection_metrics=None, + update_expression=None, condition_expression=None, + expression_attribute_names=None, + expression_attribute_values=None): + """ + Edits an existing item's attributes, or adds a new item to the + table if it does not already exist. You can put, delete, or + add attribute values. You can also perform a conditional + update (insert a new attribute name-value pair if it doesn't + exist, or replace an existing name-value pair if it has + certain expected attribute values). + + You can also return the item's attribute values in the same + UpdateItem operation using the ReturnValues parameter. + + :type table_name: string + :param table_name: The name of the table containing the item to update. + + :type key: map + :param key: The primary key of the item to be updated. Each element + consists of an attribute name and a value for that attribute. + For the primary key, you must provide all of the attributes. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. + + :type attribute_updates: map + :param attribute_updates: + There is a newer parameter available. Use UpdateExpression instead. + Note that if you use AttributeUpdates and UpdateExpression at the + same time, DynamoDB will return a ValidationException exception. + + This parameter can be used for modifying top-level attributes; however, + it does not support individual list or map elements. + + The names of attributes to be modified, the action to perform on each, + and the new value for each. If you are updating an attribute that + is an index key attribute for any indexes on that table, the + attribute type must match the index key type defined in the + AttributesDefinition of the table description. You can use + UpdateItem to update any nonkey attributes. + + Attribute values cannot be null. String and Binary type attributes must + have lengths greater than zero. Set type attributes must not be + empty. Requests with empty values will be rejected with a + ValidationException exception. + + Each AttributeUpdates element consists of an attribute name to modify, + along with the following: + + + + Value - The new value, if applicable, for this attribute. + + Action - A value that specifies how to perform the update. This + action is only valid for an existing attribute whose data type is + Number or is a set; do not use `ADD` for other data types. If an + item with the specified primary key is found in the table, the + following values perform the following actions: + + + `PUT` - Adds the specified attribute to the item. If the attribute + already exists, it is replaced by the new value. + + `DELETE` - Removes the attribute and its value, if no value is + specified for `DELETE`. The data type of the specified value must + match the existing value's data type. If a set of values is + specified, then those values are subtracted from the old set. For + example, if the attribute value was the set `[a,b,c]` and the + `DELETE` action specifies `[a,c]`, then the final attribute value + is `[b]`. Specifying an empty set is an error. + + `ADD` - Adds the specified value to the item, if the attribute does + not already exist. If the attribute does exist, then the behavior + of `ADD` depends on the data type of the attribute: + + + If the existing attribute is a number, and if Value is also a number, + then Value is mathematically added to the existing attribute. If + Value is a negative number, then it is subtracted from the existing + attribute. If you use `ADD` to increment or decrement a number + value for an item that doesn't exist before the update, DynamoDB + uses 0 as the initial value. Similarly, if you use `ADD` for an + existing item to increment or decrement an attribute value that + doesn't exist before the update, DynamoDB uses `0` as the initial + value. For example, suppose that the item you want to update + doesn't have an attribute named itemcount , but you decide to `ADD` + the number `3` to this attribute anyway. DynamoDB will create the + itemcount attribute, set its initial value to `0`, and finally add + `3` to it. The result will be a new itemcount attribute, with a + value of `3`. + + If the existing data type is a set, and if Value is also a set, then + Value is appended to the existing set. For example, if the + attribute value is the set `[1,2]`, and the `ADD` action specified + `[3]`, then the final attribute value is `[1,2,3]`. An error occurs + if an `ADD` action is specified for a set attribute and the + attribute type specified does not match the existing set type. Both + sets must have the same primitive data type. For example, if the + existing data type is a set of strings, Value must also be a set of + strings. + + If no item with the specified key is found in the table, the following + values perform the following actions: + + + `PUT` - Causes DynamoDB to create a new item with the specified + primary key, and then adds the attribute. + + `DELETE` - Nothing happens, because attributes cannot be deleted from + a nonexistent item. The operation succeeds, but DynamoDB does not + create a new item. + + `ADD` - Causes DynamoDB to create an item with the supplied primary + key and number (or set of numbers) for the attribute value. The + only data types allowed are Number and Number Set. + + + + If you specify any attributes that are part of an index key, then the + data types for those attributes must match those of the schema in + the table's attribute definition. + + :type expected: map + :param expected: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use Expected and ConditionExpression at the same + time, DynamoDB will return a ValidationException exception. + + This parameter does not support lists or maps. + + A map of attribute/condition pairs. Expected provides a conditional + block for the UpdateItem operation. + + Each element of Expected consists of an attribute name, a comparison + operator, and one or more values. DynamoDB compares the attribute + with the value(s) you supplied, using the comparison operator. For + each Expected element, the result of the evaluation is either true + or false. + + If you specify more than one element in the Expected map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + If the Expected map evaluates to true, then the conditional operation + succeeds; otherwise, it fails. + + Expected contains the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `a` is greater than `B`. For a list of + code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For type Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes in the + AttributeValueList . When performing the comparison, DynamoDB uses + strongly consistent reads. The following comparison operators are + available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | + CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following + are descriptions of each comparison operator. + + + `EQ` : Equal. `EQ` is supported for all datatypes, including lists + and maps. AttributeValueList can contain only one AttributeValue + element of type String, Number, Binary, String Set, Number Set, or + Binary Set. If an item contains an AttributeValue element of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `NE` : Not equal. `NE` is supported for all datatypes, including + lists and maps. AttributeValueList can contain only one + AttributeValue of type String, Number, Binary, String Set, Number + Set, or Binary Set. If an item contains an AttributeValue of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set type). + If an item contains an AttributeValue element of a different type + than the one specified in the request, the value does not match. + For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all + datatypes, including lists and maps. This operator tests for the + existence of an attribute, not its data type. If the data type of + attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the + result is a Boolean true . This result is because the attribute " + `a`" exists; its data type is not relevant to the `NOT_NULL` + comparison operator. + + `NULL` : The attribute does not exist. `NULL` is supported for all + datatypes, including lists and maps. This operator tests for the + nonexistence of an attribute, not its data type. If the data type + of attribute " `a`" is null, and you evaluate it using `NULL`, the + result is a Boolean false . This is because the attribute " `a`" + exists; its data type is not relevant to the `NULL` comparison + operator. + + `CONTAINS` : Checks for a subsequence, or value in a set. + AttributeValueList can contain only one AttributeValue element of + type String, Number, or Binary (not a set type). If the target + attribute of the comparison is of type String, then the operator + checks for a substring match. If the target attribute of the + comparison is of type Binary, then the operator looks for a + subsequence of the target that matches the input. If the target + attribute of the comparison is a set (" `SS`", " `NS`", or " + `BS`"), then the operator evaluates to true if it finds an exact + match with any member of the set. CONTAINS is supported for lists: + When evaluating " `a CONTAINS b`", " `a`" can be a list; however, " + `b`" cannot be a set, a map, or a list. + + `NOT_CONTAINS` : Checks for absence of a subsequence, or absence of a + value in a set. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If the target attribute of the comparison is a String, then + the operator checks for the absence of a substring match. If the + target attribute of the comparison is Binary, then the operator + checks for the absence of a subsequence of the target that matches + the input. If the target attribute of the comparison is a set (" + `SS`", " `NS`", or " `BS`"), then the operator evaluates to true if + it does not find an exact match with any member of the set. + NOT_CONTAINS is supported for lists: When evaluating " `a NOT + CONTAINS b`", " `a`" can be a list; however, " `b`" cannot be a + set, a map, or a list. + + `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set type). The target attribute of the comparison must be of type + String or Binary (not a Number or a set type). >
  • + + `IN` : Checks for matching elements within two sets. + AttributeValueList can contain one or more AttributeValue elements + of type String, Number, or Binary (not a set type). These + attributes are compared against an existing set type attribute of + an item. If any elements of the input set are present in the item + attribute, the expression evaluates to true. + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set type). A target attribute matches if the target + value is greater than, or equal to, the first element and less + than, or equal to, the second element. If an item contains an + AttributeValue element of a different type than the one specified + in the request, the value does not match. For example, `{"S":"6"}` + does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare + to `{"NS":["6", "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer + Guide . + + For backward compatibility with previous DynamoDB releases, the + following parameters can be used instead of AttributeValueList and + ComparisonOperator : + + + + Value - A value for DynamoDB to compare with an attribute. + + Exists - A Boolean value that causes DynamoDB to evaluate the value + before attempting the conditional operation: + + + If Exists is `True`, DynamoDB will check to see if that attribute + value already exists in the table. If it is found, then the + condition evaluates to true; otherwise the condition evaluate to + false. + + If Exists is `False`, DynamoDB assumes that the attribute value does + not exist in the table. If in fact the value does not exist, then + the assumption is valid and the condition evaluates to true. If the + value is found, despite the assumption that it does not exist, the + condition evaluates to false. + Note that the default value for Exists is `True`. + + + The Value and Exists parameters are incompatible with + AttributeValueList and ComparisonOperator . Note that if you use + both sets of parameters at once, DynamoDB will return a + ValidationException exception. + + :type conditional_operator: string + :param conditional_operator: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use ConditionalOperator and ConditionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter does not support lists or maps. + + A logical operator to apply to the conditions in the Expected map: + + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. + + :type return_values: string + :param return_values: + Use ReturnValues if you want to get the item attributes as they + appeared either before or after they were updated. For UpdateItem , + the valid values are: + + + + `NONE` - If ReturnValues is not specified, or if its value is `NONE`, + then nothing is returned. (This setting is the default for + ReturnValues .) + + `ALL_OLD` - If UpdateItem overwrote an attribute name-value pair, + then the content of the old item is returned. + + `UPDATED_OLD` - The old versions of only the updated attributes are + returned. + + `ALL_NEW` - All of the attributes of the new version of the item are + returned. + + `UPDATED_NEW` - The new versions of only the updated attributes are + returned. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type return_item_collection_metrics: string + :param return_item_collection_metrics: A value that if set to `SIZE`, + the response includes statistics about item collections, if any, + that were modified during the operation are returned in the + response. If set to `NONE` (the default), no statistics are + returned. + + :type update_expression: string + :param update_expression: An expression that defines one or more + attributes to be updated, the action to be performed on them, and + new value(s) for them. + The following action values are available for UpdateExpression . + + + + `SET` - Adds one or more attributes and values to an item. If any of + these attribute already exist, they are replaced by the new values. + You can also use `SET` to add or subtract from an attribute that is + of type Number. `SET` supports the following functions: + + + `if_not_exists (path, operand)` - if the item does not contain an + attribute at the specified path, then `if_not_exists` evaluates to + operand; otherwise, it evaluates to path. You can use this function + to avoid overwriting an attribute that may already be present in + the item. + + `list_append (operand, operand)` - evaluates to a list with a new + element added to it. You can append the new element to the start or + the end of the list by reversing the order of the operands. + These function names are case-sensitive. + + `REMOVE` - Removes one or more attributes from an item. + + `ADD` - Adds the specified value to the item, if the attribute does + not already exist. If the attribute does exist, then the behavior + of `ADD` depends on the data type of the attribute: + + + If the existing attribute is a number, and if Value is also a number, + then Value is mathematically added to the existing attribute. If + Value is a negative number, then it is subtracted from the existing + attribute. If you use `ADD` to increment or decrement a number + value for an item that doesn't exist before the update, DynamoDB + uses `0` as the initial value. Similarly, if you use `ADD` for an + existing item to increment or decrement an attribute value that + doesn't exist before the update, DynamoDB uses `0` as the initial + value. For example, suppose that the item you want to update + doesn't have an attribute named itemcount , but you decide to `ADD` + the number `3` to this attribute anyway. DynamoDB will create the + itemcount attribute, set its initial value to `0`, and finally add + `3` to it. The result will be a new itemcount attribute in the + item, with a value of `3`. + + If the existing data type is a set and if Value is also a set, then + Value is added to the existing set. For example, if the attribute + value is the set `[1,2]`, and the `ADD` action specified `[3]`, + then the final attribute value is `[1,2,3]`. An error occurs if an + `ADD` action is specified for a set attribute and the attribute + type specified does not match the existing set type. Both sets must + have the same primitive data type. For example, if the existing + data type is a set of strings, the Value must also be a set of + strings. + The `ADD` action only supports Number and set data types. In addition, + `ADD` can only be used on top-level attributes, not nested + attributes. + + `DELETE` - Deletes an element from a set. If a set of values is + specified, then those values are subtracted from the old set. For + example, if the attribute value was the set `[a,b,c]` and the + `DELETE` action specifies `[a,c]`, then the final attribute value + is `[b]`. Specifying an empty set is an error. The `DELETE` action + only supports Number and set data types. In addition, `DELETE` can + only be used on top-level attributes, not nested attributes. + + + You can have many actions in a single expression, such as the + following: `SET a=:value1, b=:value2 DELETE :value3, :value4, + :value5` + + For more information on update expressions, go to `Modifying Items and + Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type condition_expression: string + :param condition_expression: A condition that must be satisfied in + order for a conditional update to succeed. + An expression can contain any of the following: + + + + Boolean functions: `attribute_exists | attribute_not_exists | + contains | begins_with` These function names are case-sensitive. + + Comparison operators: ` = | <> | < | > | <= + | >= | BETWEEN | IN` + + Logical operators: `AND | OR | NOT` + + + For more information on condition expressions, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + + + To shorten an attribute name that is very long or unwieldy in an + expression. + + To create a placeholder for repeating occurrences of an attribute + name in an expression. + + To prevent special characters in an attribute name from being + misinterpreted in an expression. + + + Use the **#** character in an expression to dereference an attribute + name. For example, consider the following expression: + + + + `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName + = "Jones"` + + + Now suppose that you specified the following for + ExpressionAttributeNames : + + + + `{"#name":"order.customerInfo.LastName"}` + + + The expression can now be simplified as follows: + + + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + + `Available | Backordered | Discontinued` + + You would first need to specify ExpressionAttributeValues as follows: + + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` + + You could then use these values in an expression, such as this: + + `ProductStatus IN (:avail, :back, :disc)` + + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + """ + params = {'TableName': table_name, 'Key': key, } + if attribute_updates is not None: + params['AttributeUpdates'] = attribute_updates + if expected is not None: + params['Expected'] = expected + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator + if return_values is not None: + params['ReturnValues'] = return_values + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if return_item_collection_metrics is not None: + params['ReturnItemCollectionMetrics'] = return_item_collection_metrics + if update_expression is not None: + params['UpdateExpression'] = update_expression + if condition_expression is not None: + params['ConditionExpression'] = condition_expression + if expression_attribute_names is not None: + params['ExpressionAttributeNames'] = expression_attribute_names + if expression_attribute_values is not None: + params['ExpressionAttributeValues'] = expression_attribute_values + return self.make_request(action='UpdateItem', + body=json.dumps(params)) + + def update_table(self, table_name, provisioned_throughput=None, + global_secondary_index_updates=None, + attribute_definitions=None): + """ + Updates the provisioned throughput for the given table, or + manages the global secondary indexes on the table. + + You can increase or decrease the table's provisioned + throughput values within the maximums and minimums listed in + the `Limits`_ section in the Amazon DynamoDB Developer Guide . + + In addition, you can use UpdateTable to add, modify or delete + global secondary indexes on the table. For more information, + see `Managing Global Secondary Indexes`_ in the Amazon + DynamoDB Developer Guide . + + The table must be in the `ACTIVE` state for UpdateTable to + succeed. UpdateTable is an asynchronous operation; while + executing the operation, the table is in the `UPDATING` state. + While the table is in the `UPDATING` state, the table still + has the provisioned throughput from before the call. The + table's new provisioned throughput settings go into effect + when the table returns to the `ACTIVE` state; at that point, + the UpdateTable operation is complete. + + :type attribute_definitions: list + :param attribute_definitions: An array of attributes that describe the + key schema for the table and indexes. If you are adding a new + global secondary index to the table, AttributeDefinitions must + include the key element(s) of the new index. + + :type table_name: string + :param table_name: The name of the table to be updated. + + :type provisioned_throughput: dict + :param provisioned_throughput: Represents the provisioned throughput + settings for a specified table or index. The settings can be + modified using the UpdateTable operation. + For current minimum and maximum provisioned throughput values, see + `Limits`_ in the Amazon DynamoDB Developer Guide . + + :type global_secondary_index_updates: list + :param global_secondary_index_updates: + An array of one or more global secondary indexes for the table. For + each index in the array, you can specify one action: + + + + Create - add a new global secondary index to the table. + + Update - modify the provisioned throughput settings of an existing + global secondary index. + + Delete - remove a global secondary index from the table. + + """ + params = {'TableName': table_name, } + if attribute_definitions is not None: + params['AttributeDefinitions'] = attribute_definitions + if provisioned_throughput is not None: + params['ProvisionedThroughput'] = provisioned_throughput + if global_secondary_index_updates is not None: + params['GlobalSecondaryIndexUpdates'] = global_secondary_index_updates + return self.make_request(action='UpdateTable', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.host, + 'Content-Type': 'application/x-amz-json-1.0', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body, host=self.host) + response = self._mexe(http_request, sender=None, + override_num_retries=self.NumberRetries, + retry_handler=self._retry_handler) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + + def _retry_handler(self, response, i, next_sleep): + status = None + boto.log.debug("Saw HTTP status: %s" % response.status) + if response.status == 400: + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + data = json.loads(response_body) + if 'ProvisionedThroughputExceededException' in data.get('__type'): + self.throughput_exceeded_events += 1 + msg = "%s, retry attempt %s" % ( + 'ProvisionedThroughputExceededException', + i + ) + next_sleep = self._truncated_exponential_time(i) + i += 1 + status = (msg, i, next_sleep) + if i == self.NumberRetries: + # If this was our last retry attempt, raise + # a specific error saying that the throughput + # was exceeded. + raise exceptions.ProvisionedThroughputExceededException( + response.status, response.reason, data) + elif 'ConditionalCheckFailedException' in data.get('__type'): + raise exceptions.ConditionalCheckFailedException( + response.status, response.reason, data) + elif 'ValidationException' in data.get('__type'): + raise exceptions.ValidationException( + response.status, response.reason, data) + else: + raise self.ResponseError(response.status, response.reason, + data) + expected_crc32 = response.getheader('x-amz-crc32') + if self._validate_checksums and expected_crc32 is not None: + boto.log.debug('Validating crc32 checksum for body: %s', + response.read()) + actual_crc32 = crc32(response.read()) & 0xffffffff + expected_crc32 = int(expected_crc32) + if actual_crc32 != expected_crc32: + msg = ("The calculated checksum %s did not match the expected " + "checksum %s" % (actual_crc32, expected_crc32)) + status = (msg, i + 1, self._truncated_exponential_time(i)) + return status + + def _truncated_exponential_time(self, i): + if i == 0: + next_sleep = 0 + else: + next_sleep = min(0.05 * (2 ** i), + boto.config.get('Boto', 'max_retry_delay', 60)) + return next_sleep diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/results.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/results.py new file mode 100644 index 0000000000000000000000000000000000000000..36f04d0a960908742ecdaeccf1eaa4ff4efd3409 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/results.py @@ -0,0 +1,204 @@ +class ResultSet(object): + """ + A class used to lazily handle page-to-page navigation through a set of + results. + + It presents a transparent iterator interface, so that all the user has + to do is use it in a typical ``for`` loop (or list comprehension, etc.) + to fetch results, even if they weren't present in the current page of + results. + + This is used by the ``Table.query`` & ``Table.scan`` methods. + + Example:: + + >>> users = Table('users') + >>> results = ResultSet() + >>> results.to_call(users.query, username__gte='johndoe') + # Now iterate. When it runs out of results, it'll fetch the next page. + >>> for res in results: + ... print res['username'] + + """ + def __init__(self, max_page_size=None): + super(ResultSet, self).__init__() + self.the_callable = None + self.call_args = [] + self.call_kwargs = {} + self._results = [] + self._offset = -1 + self._results_left = True + self._last_key_seen = None + self._fetches = 0 + self._max_page_size = max_page_size + self._limit = None + + @property + def first_key(self): + return 'exclusive_start_key' + + def _reset(self): + """ + Resets the internal state of the ``ResultSet``. + + This prevents results from being cached long-term & consuming + excess memory. + + Largely internal. + """ + self._results = [] + self._offset = 0 + + def __iter__(self): + return self + + def __next__(self): + self._offset += 1 + + if self._offset >= len(self._results): + if self._results_left is False: + raise StopIteration() + + self.fetch_more() + + # It's possible that previous call to ``fetch_more`` may not return + # anything useful but there may be more results. Loop until we get + # something back, making sure we guard for no results left. + while not len(self._results) and self._results_left: + self.fetch_more() + + if self._offset < len(self._results): + if self._limit is not None: + self._limit -= 1 + + if self._limit < 0: + raise StopIteration() + + return self._results[self._offset] + else: + raise StopIteration() + + next = __next__ + + def to_call(self, the_callable, *args, **kwargs): + """ + Sets up the callable & any arguments to run it with. + + This is stored for subsequent calls so that those queries can be + run without requiring user intervention. + + Example:: + + # Just an example callable. + >>> def squares_to(y): + ... for x in range(1, y): + ... yield x**2 + >>> rs = ResultSet() + # Set up what to call & arguments. + >>> rs.to_call(squares_to, y=3) + + """ + if not callable(the_callable): + raise ValueError( + 'You must supply an object or function to be called.' + ) + + # We pop the ``limit``, if present, to track how many we should return + # to the user. This isn't the same as the ``limit`` that the low-level + # DDB api calls use (which limit page size, not the overall result set). + self._limit = kwargs.pop('limit', None) + + if self._limit is not None and self._limit < 0: + self._limit = None + + self.the_callable = the_callable + self.call_args = args + self.call_kwargs = kwargs + + def fetch_more(self): + """ + When the iterator runs out of results, this method is run to re-execute + the callable (& arguments) to fetch the next page. + + Largely internal. + """ + self._reset() + + args = self.call_args[:] + kwargs = self.call_kwargs.copy() + + if self._last_key_seen is not None: + kwargs[self.first_key] = self._last_key_seen + + # If the page size is greater than limit set them + # to the same value + if self._limit and self._max_page_size and self._max_page_size > self._limit: + self._max_page_size = self._limit + + # Put in the max page size. + if self._max_page_size is not None: + kwargs['limit'] = self._max_page_size + elif self._limit is not None: + # If max_page_size is not set and limit is available + # use it as the page size + kwargs['limit'] = self._limit + + results = self.the_callable(*args, **kwargs) + self._fetches += 1 + new_results = results.get('results', []) + self._last_key_seen = results.get('last_key', None) + + if len(new_results): + self._results.extend(results['results']) + + # Check the limit, if it's present. + if self._limit is not None and self._limit >= 0: + limit = self._limit + limit -= len(results['results']) + # If we've exceeded the limit, we don't have any more + # results to look for. + if limit <= 0: + self._results_left = False + + if self._last_key_seen is None: + self._results_left = False + + +class BatchGetResultSet(ResultSet): + def __init__(self, *args, **kwargs): + self._keys_left = kwargs.pop('keys', []) + self._max_batch_get = kwargs.pop('max_batch_get', 100) + super(BatchGetResultSet, self).__init__(*args, **kwargs) + + def fetch_more(self): + self._reset() + + args = self.call_args[:] + kwargs = self.call_kwargs.copy() + + # Slice off the max we can fetch. + kwargs['keys'] = self._keys_left[:self._max_batch_get] + self._keys_left = self._keys_left[self._max_batch_get:] + + if len(self._keys_left) <= 0: + self._results_left = False + + results = self.the_callable(*args, **kwargs) + + if not len(results.get('results', [])): + return + + self._results.extend(results['results']) + + for offset, key_data in enumerate(results.get('unprocessed_keys', [])): + # We've got an unprocessed key. Reinsert it into the list. + # DynamoDB only returns valid keys, so there should be no risk of + # missing keys ever making it here. + self._keys_left.insert(offset, key_data) + + if len(self._keys_left) > 0: + self._results_left = True + + # Decrease the limit, if it's present. + if self.call_kwargs.get('limit'): + self.call_kwargs['limit'] -= len(results['results']) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/table.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/table.py new file mode 100644 index 0000000000000000000000000000000000000000..d02ff5c7deb4acbb53925c85ab0e489649192a4d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/table.py @@ -0,0 +1,1722 @@ +import boto +from boto.dynamodb2 import exceptions +from boto.dynamodb2.fields import (HashKey, RangeKey, + AllIndex, KeysOnlyIndex, IncludeIndex, + GlobalAllIndex, GlobalKeysOnlyIndex, + GlobalIncludeIndex) +from boto.dynamodb2.items import Item +from boto.dynamodb2.layer1 import DynamoDBConnection +from boto.dynamodb2.results import ResultSet, BatchGetResultSet +from boto.dynamodb2.types import (NonBooleanDynamizer, Dynamizer, FILTER_OPERATORS, + QUERY_OPERATORS, STRING) +from boto.exception import JSONResponseError + + +class Table(object): + """ + Interacts & models the behavior of a DynamoDB table. + + The ``Table`` object represents a set (or rough categorization) of + records within DynamoDB. The important part is that all records within the + table, while largely-schema-free, share the same schema & are essentially + namespaced for use in your application. For example, you might have a + ``users`` table or a ``forums`` table. + """ + max_batch_get = 100 + + _PROJECTION_TYPE_TO_INDEX = dict( + global_indexes=dict( + ALL=GlobalAllIndex, + KEYS_ONLY=GlobalKeysOnlyIndex, + INCLUDE=GlobalIncludeIndex, + ), local_indexes=dict( + ALL=AllIndex, + KEYS_ONLY=KeysOnlyIndex, + INCLUDE=IncludeIndex, + ) + ) + + def __init__(self, table_name, schema=None, throughput=None, indexes=None, + global_indexes=None, connection=None): + """ + Sets up a new in-memory ``Table``. + + This is useful if the table already exists within DynamoDB & you simply + want to use it for additional interactions. The only required parameter + is the ``table_name``. However, under the hood, the object will call + ``describe_table`` to determine the schema/indexes/throughput. You + can avoid this extra call by passing in ``schema`` & ``indexes``. + + **IMPORTANT** - If you're creating a new ``Table`` for the first time, + you should use the ``Table.create`` method instead, as it will + persist the table structure to DynamoDB. + + Requires a ``table_name`` parameter, which should be a simple string + of the name of the table. + + Optionally accepts a ``schema`` parameter, which should be a list of + ``BaseSchemaField`` subclasses representing the desired schema. + + Optionally accepts a ``throughput`` parameter, which should be a + dictionary. If provided, it should specify a ``read`` & ``write`` key, + both of which should have an integer value associated with them. + + Optionally accepts a ``indexes`` parameter, which should be a list of + ``BaseIndexField`` subclasses representing the desired indexes. + + Optionally accepts a ``global_indexes`` parameter, which should be a + list of ``GlobalBaseIndexField`` subclasses representing the desired + indexes. + + Optionally accepts a ``connection`` parameter, which should be a + ``DynamoDBConnection`` instance (or subclass). This is primarily useful + for specifying alternate connection parameters. + + Example:: + + # The simple, it-already-exists case. + >>> conn = Table('users') + + # The full, minimum-extra-calls case. + >>> from boto import dynamodb2 + >>> users = Table('users', schema=[ + ... HashKey('username'), + ... RangeKey('date_joined', data_type=NUMBER) + ... ], throughput={ + ... 'read':20, + ... 'write': 10, + ... }, indexes=[ + ... KeysOnlyIndex('MostRecentlyJoined', parts=[ + ... HashKey('username') + ... RangeKey('date_joined') + ... ]), + ... ], global_indexes=[ + ... GlobalAllIndex('UsersByZipcode', parts=[ + ... HashKey('zipcode'), + ... RangeKey('username'), + ... ], + ... throughput={ + ... 'read':10, + ... 'write":10, + ... }), + ... ], connection=dynamodb2.connect_to_region('us-west-2', + ... aws_access_key_id='key', + ... aws_secret_access_key='key', + ... )) + + """ + self.table_name = table_name + self.connection = connection + self.throughput = { + 'read': 5, + 'write': 5, + } + self.schema = schema + self.indexes = indexes + self.global_indexes = global_indexes + + if self.connection is None: + self.connection = DynamoDBConnection() + + if throughput is not None: + self.throughput = throughput + + self._dynamizer = NonBooleanDynamizer() + + def use_boolean(self): + self._dynamizer = Dynamizer() + + @classmethod + def create(cls, table_name, schema, throughput=None, indexes=None, + global_indexes=None, connection=None): + """ + Creates a new table in DynamoDB & returns an in-memory ``Table`` object. + + This will setup a brand new table within DynamoDB. The ``table_name`` + must be unique for your AWS account. The ``schema`` is also required + to define the key structure of the table. + + **IMPORTANT** - You should consider the usage pattern of your table + up-front, as the schema can **NOT** be modified once the table is + created, requiring the creation of a new table & migrating the data + should you wish to revise it. + + **IMPORTANT** - If the table already exists in DynamoDB, additional + calls to this method will result in an error. If you just need + a ``Table`` object to interact with the existing table, you should + just initialize a new ``Table`` object, which requires only the + ``table_name``. + + Requires a ``table_name`` parameter, which should be a simple string + of the name of the table. + + Requires a ``schema`` parameter, which should be a list of + ``BaseSchemaField`` subclasses representing the desired schema. + + Optionally accepts a ``throughput`` parameter, which should be a + dictionary. If provided, it should specify a ``read`` & ``write`` key, + both of which should have an integer value associated with them. + + Optionally accepts a ``indexes`` parameter, which should be a list of + ``BaseIndexField`` subclasses representing the desired indexes. + + Optionally accepts a ``global_indexes`` parameter, which should be a + list of ``GlobalBaseIndexField`` subclasses representing the desired + indexes. + + Optionally accepts a ``connection`` parameter, which should be a + ``DynamoDBConnection`` instance (or subclass). This is primarily useful + for specifying alternate connection parameters. + + Example:: + + >>> users = Table.create('users', schema=[ + ... HashKey('username'), + ... RangeKey('date_joined', data_type=NUMBER) + ... ], throughput={ + ... 'read':20, + ... 'write': 10, + ... }, indexes=[ + ... KeysOnlyIndex('MostRecentlyJoined', parts=[ + ... RangeKey('date_joined') + ... ]), global_indexes=[ + ... GlobalAllIndex('UsersByZipcode', parts=[ + ... HashKey('zipcode'), + ... RangeKey('username'), + ... ], + ... throughput={ + ... 'read':10, + ... 'write':10, + ... }), + ... ]) + + """ + table = cls(table_name=table_name, connection=connection) + table.schema = schema + + if throughput is not None: + table.throughput = throughput + + if indexes is not None: + table.indexes = indexes + + if global_indexes is not None: + table.global_indexes = global_indexes + + # Prep the schema. + raw_schema = [] + attr_defs = [] + seen_attrs = set() + + for field in table.schema: + raw_schema.append(field.schema()) + # Build the attributes off what we know. + seen_attrs.add(field.name) + attr_defs.append(field.definition()) + + raw_throughput = { + 'ReadCapacityUnits': int(table.throughput['read']), + 'WriteCapacityUnits': int(table.throughput['write']), + } + kwargs = {} + + kwarg_map = { + 'indexes': 'local_secondary_indexes', + 'global_indexes': 'global_secondary_indexes', + } + for index_attr in ('indexes', 'global_indexes'): + table_indexes = getattr(table, index_attr) + if table_indexes: + raw_indexes = [] + for index_field in table_indexes: + raw_indexes.append(index_field.schema()) + # Make sure all attributes specified in the indexes are + # added to the definition + for field in index_field.parts: + if field.name not in seen_attrs: + seen_attrs.add(field.name) + attr_defs.append(field.definition()) + + kwargs[kwarg_map[index_attr]] = raw_indexes + + table.connection.create_table( + table_name=table.table_name, + attribute_definitions=attr_defs, + key_schema=raw_schema, + provisioned_throughput=raw_throughput, + **kwargs + ) + return table + + def _introspect_schema(self, raw_schema, raw_attributes=None): + """ + Given a raw schema structure back from a DynamoDB response, parse + out & build the high-level Python objects that represent them. + """ + schema = [] + sane_attributes = {} + + if raw_attributes: + for field in raw_attributes: + sane_attributes[field['AttributeName']] = field['AttributeType'] + + for field in raw_schema: + data_type = sane_attributes.get(field['AttributeName'], STRING) + + if field['KeyType'] == 'HASH': + schema.append( + HashKey(field['AttributeName'], data_type=data_type) + ) + elif field['KeyType'] == 'RANGE': + schema.append( + RangeKey(field['AttributeName'], data_type=data_type) + ) + else: + raise exceptions.UnknownSchemaFieldError( + "%s was seen, but is unknown. Please report this at " + "https://github.com/boto/boto/issues." % field['KeyType'] + ) + + return schema + + def _introspect_all_indexes(self, raw_indexes, map_indexes_projection): + """ + Given a raw index/global index structure back from a DynamoDB response, + parse out & build the high-level Python objects that represent them. + """ + indexes = [] + + for field in raw_indexes: + index_klass = map_indexes_projection.get('ALL') + kwargs = { + 'parts': [] + } + + if field['Projection']['ProjectionType'] == 'ALL': + index_klass = map_indexes_projection.get('ALL') + elif field['Projection']['ProjectionType'] == 'KEYS_ONLY': + index_klass = map_indexes_projection.get('KEYS_ONLY') + elif field['Projection']['ProjectionType'] == 'INCLUDE': + index_klass = map_indexes_projection.get('INCLUDE') + kwargs['includes'] = field['Projection']['NonKeyAttributes'] + else: + raise exceptions.UnknownIndexFieldError( + "%s was seen, but is unknown. Please report this at " + "https://github.com/boto/boto/issues." % \ + field['Projection']['ProjectionType'] + ) + + name = field['IndexName'] + kwargs['parts'] = self._introspect_schema(field['KeySchema'], None) + indexes.append(index_klass(name, **kwargs)) + + return indexes + + def _introspect_indexes(self, raw_indexes): + """ + Given a raw index structure back from a DynamoDB response, parse + out & build the high-level Python objects that represent them. + """ + return self._introspect_all_indexes( + raw_indexes, self._PROJECTION_TYPE_TO_INDEX.get('local_indexes')) + + def _introspect_global_indexes(self, raw_global_indexes): + """ + Given a raw global index structure back from a DynamoDB response, parse + out & build the high-level Python objects that represent them. + """ + return self._introspect_all_indexes( + raw_global_indexes, + self._PROJECTION_TYPE_TO_INDEX.get('global_indexes')) + + def describe(self): + """ + Describes the current structure of the table in DynamoDB. + + This information will be used to update the ``schema``, ``indexes``, + ``global_indexes`` and ``throughput`` information on the ``Table``. Some + calls, such as those involving creating keys or querying, will require + this information to be populated. + + It also returns the full raw data structure from DynamoDB, in the + event you'd like to parse out additional information (such as the + ``ItemCount`` or usage information). + + Example:: + + >>> users.describe() + { + # Lots of keys here... + } + >>> len(users.schema) + 2 + + """ + result = self.connection.describe_table(self.table_name) + + # Blindly update throughput, since what's on DynamoDB's end is likely + # more correct. + raw_throughput = result['Table']['ProvisionedThroughput'] + self.throughput['read'] = int(raw_throughput['ReadCapacityUnits']) + self.throughput['write'] = int(raw_throughput['WriteCapacityUnits']) + + if not self.schema: + # Since we have the data, build the schema. + raw_schema = result['Table'].get('KeySchema', []) + raw_attributes = result['Table'].get('AttributeDefinitions', []) + self.schema = self._introspect_schema(raw_schema, raw_attributes) + + if not self.indexes: + # Build the index information as well. + raw_indexes = result['Table'].get('LocalSecondaryIndexes', []) + self.indexes = self._introspect_indexes(raw_indexes) + + # Build the global index information as well. + raw_global_indexes = result['Table'].get('GlobalSecondaryIndexes', []) + self.global_indexes = self._introspect_global_indexes(raw_global_indexes) + + # This is leaky. + return result + + def update(self, throughput=None, global_indexes=None): + """ + Updates table attributes and global indexes in DynamoDB. + + Optionally accepts a ``throughput`` parameter, which should be a + dictionary. If provided, it should specify a ``read`` & ``write`` key, + both of which should have an integer value associated with them. + + Optionally accepts a ``global_indexes`` parameter, which should be a + dictionary. If provided, it should specify the index name, which is also + a dict containing a ``read`` & ``write`` key, both of which + should have an integer value associated with them. If you are writing + new code, please use ``Table.update_global_secondary_index``. + + Returns ``True`` on success. + + Example:: + + # For a read-heavier application... + >>> users.update(throughput={ + ... 'read': 20, + ... 'write': 10, + ... }) + True + + # To also update the global index(es) throughput. + >>> users.update(throughput={ + ... 'read': 20, + ... 'write': 10, + ... }, + ... global_secondary_indexes={ + ... 'TheIndexNameHere': { + ... 'read': 15, + ... 'write': 5, + ... } + ... }) + True + """ + + data = None + + if throughput: + self.throughput = throughput + data = { + 'ReadCapacityUnits': int(self.throughput['read']), + 'WriteCapacityUnits': int(self.throughput['write']), + } + + gsi_data = None + + if global_indexes: + gsi_data = [] + + for gsi_name, gsi_throughput in global_indexes.items(): + gsi_data.append({ + "Update": { + "IndexName": gsi_name, + "ProvisionedThroughput": { + "ReadCapacityUnits": int(gsi_throughput['read']), + "WriteCapacityUnits": int(gsi_throughput['write']), + }, + }, + }) + + if throughput or global_indexes: + self.connection.update_table( + self.table_name, + provisioned_throughput=data, + global_secondary_index_updates=gsi_data, + ) + + return True + else: + msg = 'You need to provide either the throughput or the ' \ + 'global_indexes to update method' + boto.log.error(msg) + + return False + + def create_global_secondary_index(self, global_index): + """ + Creates a global index in DynamoDB after the table has been created. + + Requires a ``global_indexes`` parameter, which should be a + ``GlobalBaseIndexField`` subclass representing the desired index. + + To update ``global_indexes`` information on the ``Table``, you'll need + to call ``Table.describe``. + + Returns ``True`` on success. + + Example:: + + # To create a global index + >>> users.create_global_secondary_index( + ... global_index=GlobalAllIndex( + ... 'TheIndexNameHere', parts=[ + ... HashKey('requiredHashkey', data_type=STRING), + ... RangeKey('optionalRangeKey', data_type=STRING) + ... ], + ... throughput={ + ... 'read': 2, + ... 'write': 1, + ... }) + ... ) + True + + """ + + if global_index: + gsi_data = [] + gsi_data_attr_def = [] + + gsi_data.append({ + "Create": global_index.schema() + }) + + for attr_def in global_index.parts: + gsi_data_attr_def.append(attr_def.definition()) + + self.connection.update_table( + self.table_name, + global_secondary_index_updates=gsi_data, + attribute_definitions=gsi_data_attr_def + ) + + return True + else: + msg = 'You need to provide the global_index to ' \ + 'create_global_secondary_index method' + boto.log.error(msg) + + return False + + def delete_global_secondary_index(self, global_index_name): + """ + Deletes a global index in DynamoDB after the table has been created. + + Requires a ``global_index_name`` parameter, which should be a simple + string of the name of the global secondary index. + + To update ``global_indexes`` information on the ``Table``, you'll need + to call ``Table.describe``. + + Returns ``True`` on success. + + Example:: + + # To delete a global index + >>> users.delete_global_secondary_index('TheIndexNameHere') + True + + """ + + if global_index_name: + gsi_data = [ + { + "Delete": { + "IndexName": global_index_name + } + } + ] + + self.connection.update_table( + self.table_name, + global_secondary_index_updates=gsi_data, + ) + + return True + else: + msg = 'You need to provide the global index name to ' \ + 'delete_global_secondary_index method' + boto.log.error(msg) + + return False + + def update_global_secondary_index(self, global_indexes): + """ + Updates a global index(es) in DynamoDB after the table has been created. + + Requires a ``global_indexes`` parameter, which should be a + dictionary. If provided, it should specify the index name, which is also + a dict containing a ``read`` & ``write`` key, both of which + should have an integer value associated with them. + + To update ``global_indexes`` information on the ``Table``, you'll need + to call ``Table.describe``. + + Returns ``True`` on success. + + Example:: + + # To update a global index + >>> users.update_global_secondary_index(global_indexes={ + ... 'TheIndexNameHere': { + ... 'read': 15, + ... 'write': 5, + ... } + ... }) + True + + """ + + if global_indexes: + gsi_data = [] + + for gsi_name, gsi_throughput in global_indexes.items(): + gsi_data.append({ + "Update": { + "IndexName": gsi_name, + "ProvisionedThroughput": { + "ReadCapacityUnits": int(gsi_throughput['read']), + "WriteCapacityUnits": int(gsi_throughput['write']), + }, + }, + }) + + self.connection.update_table( + self.table_name, + global_secondary_index_updates=gsi_data, + ) + return True + else: + msg = 'You need to provide the global indexes to ' \ + 'update_global_secondary_index method' + boto.log.error(msg) + + return False + + def delete(self): + """ + Deletes a table in DynamoDB. + + **IMPORTANT** - Be careful when using this method, there is no undo. + + Returns ``True`` on success. + + Example:: + + >>> users.delete() + True + + """ + self.connection.delete_table(self.table_name) + return True + + def _encode_keys(self, keys): + """ + Given a flat Python dictionary of keys/values, converts it into the + nested dictionary DynamoDB expects. + + Converts:: + + { + 'username': 'john', + 'tags': [1, 2, 5], + } + + ...to...:: + + { + 'username': {'S': 'john'}, + 'tags': {'NS': ['1', '2', '5']}, + } + + """ + raw_key = {} + + for key, value in keys.items(): + raw_key[key] = self._dynamizer.encode(value) + + return raw_key + + def get_item(self, consistent=False, attributes=None, **kwargs): + """ + Fetches an item (record) from a table in DynamoDB. + + To specify the key of the item you'd like to get, you can specify the + key attributes as kwargs. + + Optionally accepts a ``consistent`` parameter, which should be a + boolean. If you provide ``True``, it will perform + a consistent (but more expensive) read from DynamoDB. + (Default: ``False``) + + Optionally accepts an ``attributes`` parameter, which should be a + list of fieldname to fetch. (Default: ``None``, which means all fields + should be fetched) + + Returns an ``Item`` instance containing all the data for that record. + + Raises an ``ItemNotFound`` exception if the item is not found. + + Example:: + + # A simple hash key. + >>> john = users.get_item(username='johndoe') + >>> john['first_name'] + 'John' + + # A complex hash+range key. + >>> john = users.get_item(username='johndoe', last_name='Doe') + >>> john['first_name'] + 'John' + + # A consistent read (assuming the data might have just changed). + >>> john = users.get_item(username='johndoe', consistent=True) + >>> john['first_name'] + 'Johann' + + # With a key that is an invalid variable name in Python. + # Also, assumes a different schema than previous examples. + >>> john = users.get_item(**{ + ... 'date-joined': 127549192, + ... }) + >>> john['first_name'] + 'John' + + """ + raw_key = self._encode_keys(kwargs) + item_data = self.connection.get_item( + self.table_name, + raw_key, + attributes_to_get=attributes, + consistent_read=consistent + ) + if 'Item' not in item_data: + raise exceptions.ItemNotFound("Item %s couldn't be found." % kwargs) + item = Item(self) + item.load(item_data) + return item + + def has_item(self, **kwargs): + """ + Return whether an item (record) exists within a table in DynamoDB. + + To specify the key of the item you'd like to get, you can specify the + key attributes as kwargs. + + Optionally accepts a ``consistent`` parameter, which should be a + boolean. If you provide ``True``, it will perform + a consistent (but more expensive) read from DynamoDB. + (Default: ``False``) + + Optionally accepts an ``attributes`` parameter, which should be a + list of fieldnames to fetch. (Default: ``None``, which means all fields + should be fetched) + + Returns ``True`` if an ``Item`` is present, ``False`` if not. + + Example:: + + # Simple, just hash-key schema. + >>> users.has_item(username='johndoe') + True + + # Complex schema, item not present. + >>> users.has_item( + ... username='johndoe', + ... date_joined='2014-01-07' + ... ) + False + + """ + try: + self.get_item(**kwargs) + except (JSONResponseError, exceptions.ItemNotFound): + return False + + return True + + def lookup(self, *args, **kwargs): + """ + Look up an entry in DynamoDB. This is mostly backwards compatible + with boto.dynamodb. Unlike get_item, it takes hash_key and range_key first, + although you may still specify keyword arguments instead. + + Also unlike the get_item command, if the returned item has no keys + (i.e., it does not exist in DynamoDB), a None result is returned, instead + of an empty key object. + + Example:: + >>> user = users.lookup(username) + >>> user = users.lookup(username, consistent=True) + >>> app = apps.lookup('my_customer_id', 'my_app_id') + + """ + if not self.schema: + self.describe() + for x, arg in enumerate(args): + kwargs[self.schema[x].name] = arg + ret = self.get_item(**kwargs) + if not ret.keys(): + return None + return ret + + def new_item(self, *args): + """ + Returns a new, blank item + + This is mostly for consistency with boto.dynamodb + """ + if not self.schema: + self.describe() + data = {} + for x, arg in enumerate(args): + data[self.schema[x].name] = arg + return Item(self, data=data) + + def put_item(self, data, overwrite=False): + """ + Saves an entire item to DynamoDB. + + By default, if any part of the ``Item``'s original data doesn't match + what's currently in DynamoDB, this request will fail. This prevents + other processes from updating the data in between when you read the + item & when your request to update the item's data is processed, which + would typically result in some data loss. + + Requires a ``data`` parameter, which should be a dictionary of the data + you'd like to store in DynamoDB. + + Optionally accepts an ``overwrite`` parameter, which should be a + boolean. If you provide ``True``, this will tell DynamoDB to blindly + overwrite whatever data is present, if any. + + Returns ``True`` on success. + + Example:: + + >>> users.put_item(data={ + ... 'username': 'jane', + ... 'first_name': 'Jane', + ... 'last_name': 'Doe', + ... 'date_joined': 126478915, + ... }) + True + + """ + item = Item(self, data=data) + return item.save(overwrite=overwrite) + + def _put_item(self, item_data, expects=None): + """ + The internal variant of ``put_item`` (full data). This is used by the + ``Item`` objects, since that operation is represented at the + table-level by the API, but conceptually maps better to telling an + individual ``Item`` to save itself. + """ + kwargs = {} + + if expects is not None: + kwargs['expected'] = expects + + self.connection.put_item(self.table_name, item_data, **kwargs) + return True + + def _update_item(self, key, item_data, expects=None): + """ + The internal variant of ``put_item`` (partial data). This is used by the + ``Item`` objects, since that operation is represented at the + table-level by the API, but conceptually maps better to telling an + individual ``Item`` to save itself. + """ + raw_key = self._encode_keys(key) + kwargs = {} + + if expects is not None: + kwargs['expected'] = expects + + self.connection.update_item(self.table_name, raw_key, item_data, **kwargs) + return True + + def delete_item(self, expected=None, conditional_operator=None, **kwargs): + """ + Deletes a single item. You can perform a conditional delete operation + that deletes the item if it exists, or if it has an expected attribute + value. + + Conditional deletes are useful for only deleting items if specific + conditions are met. If those conditions are met, DynamoDB performs + the delete. Otherwise, the item is not deleted. + + To specify the expected attribute values of the item, you can pass a + dictionary of conditions to ``expected``. Each condition should follow + the pattern ``__=``. + + **IMPORTANT** - Be careful when using this method, there is no undo. + + To specify the key of the item you'd like to get, you can specify the + key attributes as kwargs. + + Optionally accepts an ``expected`` parameter which is a dictionary of + expected attribute value conditions. + + Optionally accepts a ``conditional_operator`` which applies to the + expected attribute value conditions: + + + `AND` - If all of the conditions evaluate to true (default) + + `OR` - True if at least one condition evaluates to true + + Returns ``True`` on success, ``False`` on failed conditional delete. + + Example:: + + # A simple hash key. + >>> users.delete_item(username='johndoe') + True + + # A complex hash+range key. + >>> users.delete_item(username='jane', last_name='Doe') + True + + # With a key that is an invalid variable name in Python. + # Also, assumes a different schema than previous examples. + >>> users.delete_item(**{ + ... 'date-joined': 127549192, + ... }) + True + + # Conditional delete + >>> users.delete_item(username='johndoe', + ... expected={'balance__eq': 0}) + True + """ + expected = self._build_filters(expected, using=FILTER_OPERATORS) + raw_key = self._encode_keys(kwargs) + + try: + self.connection.delete_item(self.table_name, raw_key, + expected=expected, + conditional_operator=conditional_operator) + except exceptions.ConditionalCheckFailedException: + return False + + return True + + def get_key_fields(self): + """ + Returns the fields necessary to make a key for a table. + + If the ``Table`` does not already have a populated ``schema``, + this will request it via a ``Table.describe`` call. + + Returns a list of fieldnames (strings). + + Example:: + + # A simple hash key. + >>> users.get_key_fields() + ['username'] + + # A complex hash+range key. + >>> users.get_key_fields() + ['username', 'last_name'] + + """ + if not self.schema: + # We don't know the structure of the table. Get a description to + # populate the schema. + self.describe() + + return [field.name for field in self.schema] + + def batch_write(self): + """ + Allows the batching of writes to DynamoDB. + + Since each write/delete call to DynamoDB has a cost associated with it, + when loading lots of data, it makes sense to batch them, creating as + few calls as possible. + + This returns a context manager that will transparently handle creating + these batches. The object you get back lightly-resembles a ``Table`` + object, sharing just the ``put_item`` & ``delete_item`` methods + (which are all that DynamoDB can batch in terms of writing data). + + DynamoDB's maximum batch size is 25 items per request. If you attempt + to put/delete more than that, the context manager will batch as many + as it can up to that number, then flush them to DynamoDB & continue + batching as more calls come in. + + Example:: + + # Assuming a table with one record... + >>> with users.batch_write() as batch: + ... batch.put_item(data={ + ... 'username': 'johndoe', + ... 'first_name': 'John', + ... 'last_name': 'Doe', + ... 'owner': 1, + ... }) + ... # Nothing across the wire yet. + ... batch.delete_item(username='bob') + ... # Still no requests sent. + ... batch.put_item(data={ + ... 'username': 'jane', + ... 'first_name': 'Jane', + ... 'last_name': 'Doe', + ... 'date_joined': 127436192, + ... }) + ... # Nothing yet, but once we leave the context, the + ... # put/deletes will be sent. + + """ + # PHENOMENAL COSMIC DOCS!!! itty-bitty code. + return BatchTable(self) + + def _build_filters(self, filter_kwargs, using=QUERY_OPERATORS): + """ + An internal method for taking query/scan-style ``**kwargs`` & turning + them into the raw structure DynamoDB expects for filtering. + """ + if filter_kwargs is None: + return + + filters = {} + + for field_and_op, value in filter_kwargs.items(): + field_bits = field_and_op.split('__') + fieldname = '__'.join(field_bits[:-1]) + + try: + op = using[field_bits[-1]] + except KeyError: + raise exceptions.UnknownFilterTypeError( + "Operator '%s' from '%s' is not recognized." % ( + field_bits[-1], + field_and_op + ) + ) + + lookup = { + 'AttributeValueList': [], + 'ComparisonOperator': op, + } + + # Special-case the ``NULL/NOT_NULL`` case. + if field_bits[-1] == 'null': + del lookup['AttributeValueList'] + + if value is False: + lookup['ComparisonOperator'] = 'NOT_NULL' + else: + lookup['ComparisonOperator'] = 'NULL' + # Special-case the ``BETWEEN`` case. + elif field_bits[-1] == 'between': + if len(value) == 2 and isinstance(value, (list, tuple)): + lookup['AttributeValueList'].append( + self._dynamizer.encode(value[0]) + ) + lookup['AttributeValueList'].append( + self._dynamizer.encode(value[1]) + ) + # Special-case the ``IN`` case + elif field_bits[-1] == 'in': + for val in value: + lookup['AttributeValueList'].append(self._dynamizer.encode(val)) + else: + # Fix up the value for encoding, because it was built to only work + # with ``set``s. + if isinstance(value, (list, tuple)): + value = set(value) + lookup['AttributeValueList'].append( + self._dynamizer.encode(value) + ) + + # Finally, insert it into the filters. + filters[fieldname] = lookup + + return filters + + def query(self, limit=None, index=None, reverse=False, consistent=False, + attributes=None, max_page_size=None, **filter_kwargs): + """ + **WARNING:** This method is provided **strictly** for + backward-compatibility. It returns results in an incorrect order. + + If you are writing new code, please use ``Table.query_2``. + """ + reverse = not reverse + return self.query_2(limit=limit, index=index, reverse=reverse, + consistent=consistent, attributes=attributes, + max_page_size=max_page_size, **filter_kwargs) + + def query_2(self, limit=None, index=None, reverse=False, + consistent=False, attributes=None, max_page_size=None, + query_filter=None, conditional_operator=None, + **filter_kwargs): + """ + Queries for a set of matching items in a DynamoDB table. + + Queries can be performed against a hash key, a hash+range key or + against any data stored in your local secondary indexes. Query filters + can be used to filter on arbitrary fields. + + **Note** - You can not query against arbitrary fields within the data + stored in DynamoDB unless you specify ``query_filter`` values. + + To specify the filters of the items you'd like to get, you can specify + the filters as kwargs. Each filter kwarg should follow the pattern + ``__=``. Query filters + are specified in the same way. + + Optionally accepts a ``limit`` parameter, which should be an integer + count of the total number of items to return. (Default: ``None`` - + all results) + + Optionally accepts an ``index`` parameter, which should be a string of + name of the local secondary index you want to query against. + (Default: ``None``) + + Optionally accepts a ``reverse`` parameter, which will present the + results in reverse order. (Default: ``False`` - normal order) + + Optionally accepts a ``consistent`` parameter, which should be a + boolean. If you provide ``True``, it will force a consistent read of + the data (more expensive). (Default: ``False`` - use eventually + consistent reads) + + Optionally accepts a ``attributes`` parameter, which should be a + tuple. If you provide any attributes only these will be fetched + from DynamoDB. This uses the ``AttributesToGet`` and set's + ``Select`` to ``SPECIFIC_ATTRIBUTES`` API. + + Optionally accepts a ``max_page_size`` parameter, which should be an + integer count of the maximum number of items to retrieve + **per-request**. This is useful in making faster requests & prevent + the scan from drowning out other queries. (Default: ``None`` - + fetch as many as DynamoDB will return) + + Optionally accepts a ``query_filter`` which is a dictionary of filter + conditions against any arbitrary field in the returned data. + + Optionally accepts a ``conditional_operator`` which applies to the + query filter conditions: + + + `AND` - True if all filter conditions evaluate to true (default) + + `OR` - True if at least one filter condition evaluates to true + + Returns a ``ResultSet``, which transparently handles the pagination of + results you get back. + + Example:: + + # Look for last names equal to "Doe". + >>> results = users.query(last_name__eq='Doe') + >>> for res in results: + ... print res['first_name'] + 'John' + 'Jane' + + # Look for last names beginning with "D", in reverse order, limit 3. + >>> results = users.query( + ... last_name__beginswith='D', + ... reverse=True, + ... limit=3 + ... ) + >>> for res in results: + ... print res['first_name'] + 'Alice' + 'Jane' + 'John' + + # Use an LSI & a consistent read. + >>> results = users.query( + ... date_joined__gte=1236451000, + ... owner__eq=1, + ... index='DateJoinedIndex', + ... consistent=True + ... ) + >>> for res in results: + ... print res['first_name'] + 'Alice' + 'Bob' + 'John' + 'Fred' + + # Filter by non-indexed field(s) + >>> results = users.query( + ... last_name__eq='Doe', + ... reverse=True, + ... query_filter={ + ... 'first_name__beginswith': 'A' + ... } + ... ) + >>> for res in results: + ... print res['first_name'] + ' ' + res['last_name'] + 'Alice Doe' + + """ + if self.schema: + if len(self.schema) == 1: + if len(filter_kwargs) <= 1: + if not self.global_indexes or not len(self.global_indexes): + # If the schema only has one field, there's <= 1 filter + # param & no Global Secondary Indexes, this is user + # error. Bail early. + raise exceptions.QueryError( + "You must specify more than one key to filter on." + ) + + if attributes is not None: + select = 'SPECIFIC_ATTRIBUTES' + else: + select = None + + results = ResultSet( + max_page_size=max_page_size + ) + kwargs = filter_kwargs.copy() + kwargs.update({ + 'limit': limit, + 'index': index, + 'reverse': reverse, + 'consistent': consistent, + 'select': select, + 'attributes_to_get': attributes, + 'query_filter': query_filter, + 'conditional_operator': conditional_operator, + }) + results.to_call(self._query, **kwargs) + return results + + def query_count(self, index=None, consistent=False, conditional_operator=None, + query_filter=None, scan_index_forward=True, limit=None, + exclusive_start_key=None, **filter_kwargs): + """ + Queries the exact count of matching items in a DynamoDB table. + + Queries can be performed against a hash key, a hash+range key or + against any data stored in your local secondary indexes. Query filters + can be used to filter on arbitrary fields. + + To specify the filters of the items you'd like to get, you can specify + the filters as kwargs. Each filter kwarg should follow the pattern + ``__=``. Query filters + are specified in the same way. + + Optionally accepts an ``index`` parameter, which should be a string of + name of the local secondary index you want to query against. + (Default: ``None``) + + Optionally accepts a ``consistent`` parameter, which should be a + boolean. If you provide ``True``, it will force a consistent read of + the data (more expensive). (Default: ``False`` - use eventually + consistent reads) + + Optionally accepts a ``query_filter`` which is a dictionary of filter + conditions against any arbitrary field in the returned data. + + Optionally accepts a ``conditional_operator`` which applies to the + query filter conditions: + + + `AND` - True if all filter conditions evaluate to true (default) + + `OR` - True if at least one filter condition evaluates to true + + Optionally accept a ``exclusive_start_key`` which is used to get + the remaining items when a query cannot return the complete count. + + Returns an integer which represents the exact amount of matched + items. + + :type scan_index_forward: boolean + :param scan_index_forward: Specifies ascending (true) or descending + (false) traversal of the index. DynamoDB returns results reflecting + the requested order determined by the range key. If the data type + is Number, the results are returned in numeric order. For String, + the results are returned in order of ASCII character code values. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values. + + If ScanIndexForward is not specified, the results are returned in + ascending order. + + :type limit: integer + :param limit: The maximum number of items to evaluate (not necessarily + the number of matching items). + + Example:: + + # Look for last names equal to "Doe". + >>> users.query_count(last_name__eq='Doe') + 5 + + # Use an LSI & a consistent read. + >>> users.query_count( + ... date_joined__gte=1236451000, + ... owner__eq=1, + ... index='DateJoinedIndex', + ... consistent=True + ... ) + 2 + + """ + key_conditions = self._build_filters( + filter_kwargs, + using=QUERY_OPERATORS + ) + + built_query_filter = self._build_filters( + query_filter, + using=FILTER_OPERATORS + ) + + count_buffer = 0 + last_evaluated_key = exclusive_start_key + + while True: + raw_results = self.connection.query( + self.table_name, + index_name=index, + consistent_read=consistent, + select='COUNT', + key_conditions=key_conditions, + query_filter=built_query_filter, + conditional_operator=conditional_operator, + limit=limit, + scan_index_forward=scan_index_forward, + exclusive_start_key=last_evaluated_key + ) + + count_buffer += int(raw_results.get('Count', 0)) + last_evaluated_key = raw_results.get('LastEvaluatedKey') + if not last_evaluated_key or count_buffer < 1: + break + + return count_buffer + + def _query(self, limit=None, index=None, reverse=False, consistent=False, + exclusive_start_key=None, select=None, attributes_to_get=None, + query_filter=None, conditional_operator=None, **filter_kwargs): + """ + The internal method that performs the actual queries. Used extensively + by ``ResultSet`` to perform each (paginated) request. + """ + kwargs = { + 'limit': limit, + 'index_name': index, + 'consistent_read': consistent, + 'select': select, + 'attributes_to_get': attributes_to_get, + 'conditional_operator': conditional_operator, + } + + if reverse: + kwargs['scan_index_forward'] = False + + if exclusive_start_key: + kwargs['exclusive_start_key'] = {} + + for key, value in exclusive_start_key.items(): + kwargs['exclusive_start_key'][key] = \ + self._dynamizer.encode(value) + + # Convert the filters into something we can actually use. + kwargs['key_conditions'] = self._build_filters( + filter_kwargs, + using=QUERY_OPERATORS + ) + + kwargs['query_filter'] = self._build_filters( + query_filter, + using=FILTER_OPERATORS + ) + + raw_results = self.connection.query( + self.table_name, + **kwargs + ) + results = [] + last_key = None + + for raw_item in raw_results.get('Items', []): + item = Item(self) + item.load({ + 'Item': raw_item, + }) + results.append(item) + + if raw_results.get('LastEvaluatedKey', None): + last_key = {} + + for key, value in raw_results['LastEvaluatedKey'].items(): + last_key[key] = self._dynamizer.decode(value) + + return { + 'results': results, + 'last_key': last_key, + } + + def scan(self, limit=None, segment=None, total_segments=None, + max_page_size=None, attributes=None, conditional_operator=None, + **filter_kwargs): + """ + Scans across all items within a DynamoDB table. + + Scans can be performed against a hash key or a hash+range key. You can + additionally filter the results after the table has been read but + before the response is returned by using query filters. + + To specify the filters of the items you'd like to get, you can specify + the filters as kwargs. Each filter kwarg should follow the pattern + ``__=``. + + Optionally accepts a ``limit`` parameter, which should be an integer + count of the total number of items to return. (Default: ``None`` - + all results) + + Optionally accepts a ``segment`` parameter, which should be an integer + of the segment to retrieve on. Please see the documentation about + Parallel Scans (Default: ``None`` - no segments) + + Optionally accepts a ``total_segments`` parameter, which should be an + integer count of number of segments to divide the table into. + Please see the documentation about Parallel Scans (Default: ``None`` - + no segments) + + Optionally accepts a ``max_page_size`` parameter, which should be an + integer count of the maximum number of items to retrieve + **per-request**. This is useful in making faster requests & prevent + the scan from drowning out other queries. (Default: ``None`` - + fetch as many as DynamoDB will return) + + Optionally accepts an ``attributes`` parameter, which should be a + tuple. If you provide any attributes only these will be fetched + from DynamoDB. This uses the ``AttributesToGet`` and set's + ``Select`` to ``SPECIFIC_ATTRIBUTES`` API. + + Returns a ``ResultSet``, which transparently handles the pagination of + results you get back. + + Example:: + + # All results. + >>> everything = users.scan() + + # Look for last names beginning with "D". + >>> results = users.scan(last_name__beginswith='D') + >>> for res in results: + ... print res['first_name'] + 'Alice' + 'John' + 'Jane' + + # Use an ``IN`` filter & limit. + >>> results = users.scan( + ... age__in=[25, 26, 27, 28, 29], + ... limit=1 + ... ) + >>> for res in results: + ... print res['first_name'] + 'Alice' + + """ + results = ResultSet( + max_page_size=max_page_size + ) + kwargs = filter_kwargs.copy() + kwargs.update({ + 'limit': limit, + 'segment': segment, + 'total_segments': total_segments, + 'attributes': attributes, + 'conditional_operator': conditional_operator, + }) + results.to_call(self._scan, **kwargs) + return results + + def _scan(self, limit=None, exclusive_start_key=None, segment=None, + total_segments=None, attributes=None, conditional_operator=None, + **filter_kwargs): + """ + The internal method that performs the actual scan. Used extensively + by ``ResultSet`` to perform each (paginated) request. + """ + kwargs = { + 'limit': limit, + 'segment': segment, + 'total_segments': total_segments, + 'attributes_to_get': attributes, + 'conditional_operator': conditional_operator, + } + + if exclusive_start_key: + kwargs['exclusive_start_key'] = {} + + for key, value in exclusive_start_key.items(): + kwargs['exclusive_start_key'][key] = \ + self._dynamizer.encode(value) + + # Convert the filters into something we can actually use. + kwargs['scan_filter'] = self._build_filters( + filter_kwargs, + using=FILTER_OPERATORS + ) + + raw_results = self.connection.scan( + self.table_name, + **kwargs + ) + results = [] + last_key = None + + for raw_item in raw_results.get('Items', []): + item = Item(self) + item.load({ + 'Item': raw_item, + }) + results.append(item) + + if raw_results.get('LastEvaluatedKey', None): + last_key = {} + + for key, value in raw_results['LastEvaluatedKey'].items(): + last_key[key] = self._dynamizer.decode(value) + + return { + 'results': results, + 'last_key': last_key, + } + + def batch_get(self, keys, consistent=False, attributes=None): + """ + Fetches many specific items in batch from a table. + + Requires a ``keys`` parameter, which should be a list of dictionaries. + Each dictionary should consist of the keys values to specify. + + Optionally accepts a ``consistent`` parameter, which should be a + boolean. If you provide ``True``, a strongly consistent read will be + used. (Default: False) + + Optionally accepts an ``attributes`` parameter, which should be a + tuple. If you provide any attributes only these will be fetched + from DynamoDB. + + Returns a ``ResultSet``, which transparently handles the pagination of + results you get back. + + Example:: + + >>> results = users.batch_get(keys=[ + ... { + ... 'username': 'johndoe', + ... }, + ... { + ... 'username': 'jane', + ... }, + ... { + ... 'username': 'fred', + ... }, + ... ]) + >>> for res in results: + ... print res['first_name'] + 'John' + 'Jane' + 'Fred' + + """ + # We pass the keys to the constructor instead, so it can maintain it's + # own internal state as to what keys have been processed. + results = BatchGetResultSet(keys=keys, max_batch_get=self.max_batch_get) + results.to_call(self._batch_get, consistent=consistent, attributes=attributes) + return results + + def _batch_get(self, keys, consistent=False, attributes=None): + """ + The internal method that performs the actual batch get. Used extensively + by ``BatchGetResultSet`` to perform each (paginated) request. + """ + items = { + self.table_name: { + 'Keys': [], + }, + } + + if consistent: + items[self.table_name]['ConsistentRead'] = True + + if attributes is not None: + items[self.table_name]['AttributesToGet'] = attributes + + for key_data in keys: + raw_key = {} + + for key, value in key_data.items(): + raw_key[key] = self._dynamizer.encode(value) + + items[self.table_name]['Keys'].append(raw_key) + + raw_results = self.connection.batch_get_item(request_items=items) + results = [] + unprocessed_keys = [] + + for raw_item in raw_results['Responses'].get(self.table_name, []): + item = Item(self) + item.load({ + 'Item': raw_item, + }) + results.append(item) + + raw_unproccessed = raw_results.get('UnprocessedKeys', {}) + + for raw_key in raw_unproccessed.get('Keys', []): + py_key = {} + + for key, value in raw_key.items(): + py_key[key] = self._dynamizer.decode(value) + + unprocessed_keys.append(py_key) + + return { + 'results': results, + # NEVER return a ``last_key``. Just in-case any part of + # ``ResultSet`` peeks through, since much of the + # original underlying implementation is based on this key. + 'last_key': None, + 'unprocessed_keys': unprocessed_keys, + } + + def count(self): + """ + Returns a (very) eventually consistent count of the number of items + in a table. + + Lag time is about 6 hours, so don't expect a high degree of accuracy. + + Example:: + + >>> users.count() + 6 + + """ + info = self.describe() + return info['Table'].get('ItemCount', 0) + + +class BatchTable(object): + """ + Used by ``Table`` as the context manager for batch writes. + + You likely don't want to try to use this object directly. + """ + def __init__(self, table): + self.table = table + self._to_put = [] + self._to_delete = [] + self._unprocessed = [] + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + if self._to_put or self._to_delete: + # Flush anything that's left. + self.flush() + + if self._unprocessed: + # Finally, handle anything that wasn't processed. + self.resend_unprocessed() + + def put_item(self, data, overwrite=False): + self._to_put.append(data) + + if self.should_flush(): + self.flush() + + def delete_item(self, **kwargs): + self._to_delete.append(kwargs) + + if self.should_flush(): + self.flush() + + def should_flush(self): + if len(self._to_put) + len(self._to_delete) == 25: + return True + + return False + + def flush(self): + batch_data = { + self.table.table_name: [ + # We'll insert data here shortly. + ], + } + + for put in self._to_put: + item = Item(self.table, data=put) + batch_data[self.table.table_name].append({ + 'PutRequest': { + 'Item': item.prepare_full(), + } + }) + + for delete in self._to_delete: + batch_data[self.table.table_name].append({ + 'DeleteRequest': { + 'Key': self.table._encode_keys(delete), + } + }) + + resp = self.table.connection.batch_write_item(batch_data) + self.handle_unprocessed(resp) + + self._to_put = [] + self._to_delete = [] + return True + + def handle_unprocessed(self, resp): + if len(resp.get('UnprocessedItems', [])): + table_name = self.table.table_name + unprocessed = resp['UnprocessedItems'].get(table_name, []) + + # Some items have not been processed. Stow them for now & + # re-attempt processing on ``__exit__``. + msg = "%s items were unprocessed. Storing for later." + boto.log.info(msg % len(unprocessed)) + self._unprocessed.extend(unprocessed) + + def resend_unprocessed(self): + # If there are unprocessed records (for instance, the user was over + # their throughput limitations), iterate over them & send until they're + # all there. + boto.log.info( + "Re-sending %s unprocessed items." % len(self._unprocessed) + ) + + while len(self._unprocessed): + # Again, do 25 at a time. + to_resend = self._unprocessed[:25] + # Remove them from the list. + self._unprocessed = self._unprocessed[25:] + batch_data = { + self.table.table_name: to_resend + } + boto.log.info("Sending %s items" % len(to_resend)) + resp = self.table.connection.batch_write_item(batch_data) + self.handle_unprocessed(resp) + boto.log.info( + "%s unprocessed items left" % len(self._unprocessed) + ) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/types.py b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/types.py new file mode 100644 index 0000000000000000000000000000000000000000..1216621ac5b9f1ba69628e6f57d13d191e06437e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/dynamodb2/types.py @@ -0,0 +1,44 @@ +# Shadow the DynamoDB v1 bits. +# This way, no end user should have to cross-import between versions & we +# reserve the namespace to extend v2 if it's ever needed. +from boto.dynamodb.types import NonBooleanDynamizer, Dynamizer + + +# Some constants for our use. +STRING = 'S' +NUMBER = 'N' +BINARY = 'B' +STRING_SET = 'SS' +NUMBER_SET = 'NS' +BINARY_SET = 'BS' +NULL = 'NULL' +BOOLEAN = 'BOOL' +MAP = 'M' +LIST = 'L' + +QUERY_OPERATORS = { + 'eq': 'EQ', + 'lte': 'LE', + 'lt': 'LT', + 'gte': 'GE', + 'gt': 'GT', + 'beginswith': 'BEGINS_WITH', + 'between': 'BETWEEN', +} + +FILTER_OPERATORS = { + 'eq': 'EQ', + 'ne': 'NE', + 'lte': 'LE', + 'lt': 'LT', + 'gte': 'GE', + 'gt': 'GT', + # FIXME: Is this necessary? i.e. ``whatever__null=False`` + 'nnull': 'NOT_NULL', + 'null': 'NULL', + 'contains': 'CONTAINS', + 'ncontains': 'NOT_CONTAINS', + 'beginswith': 'BEGINS_WITH', + 'in': 'IN', + 'between': 'BETWEEN', +} diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c3976da19f54d8f099fd5ec18633a99e69fa928a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/__init__.py @@ -0,0 +1,86 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This module provides an interface to the Elastic Compute Cloud (EC2) +service from AWS. +""" +from boto.ec2.connection import EC2Connection +from boto.regioninfo import RegionInfo, get_regions, load_regions + + +RegionData = load_regions().get('ec2', {}) + + +def regions(**kw_params): + """ + Get all available regions for the EC2 service. + You may pass any of the arguments accepted by the EC2Connection + object's constructor as keyword arguments and they will be + passed along to the EC2Connection object. + + :rtype: list + :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` + """ + return get_regions('ec2', connection_cls=EC2Connection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.ec2.connection.EC2Connection`. + Any additional parameters after the region_name are passed on to + the connect method of the region object. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.ec2.connection.EC2Connection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + if 'region' in kw_params and isinstance(kw_params['region'], RegionInfo)\ + and region_name == kw_params['region'].name: + return EC2Connection(**kw_params) + + for region in regions(**kw_params): + if region.name == region_name: + return region.connect(**kw_params) + + return None + + +def get_region(region_name, **kw_params): + """ + Find and return a :class:`boto.ec2.regioninfo.RegionInfo` object + given a region name. + + :type: str + :param: The name of the region. + + :rtype: :class:`boto.ec2.regioninfo.RegionInfo` + :return: The RegionInfo object for the given region or None if + an invalid region name is provided. + """ + for region in regions(**kw_params): + if region.name == region_name: + return region + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/address.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/address.py new file mode 100644 index 0000000000000000000000000000000000000000..807406f78b1efa647984ecb4d9bd685e1b253a9f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/address.py @@ -0,0 +1,130 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +from boto.ec2.ec2object import EC2Object + + +class Address(EC2Object): + """ + Represents an EC2 Elastic IP Address + + :ivar public_ip: The Elastic IP address. + :ivar instance_id: The instance the address is associated with (if any). + :ivar domain: Indicates whether the address is a EC2 address or a VPC address (standard|vpc). + :ivar allocation_id: The allocation ID for the address (VPC addresses only). + :ivar association_id: The association ID for the address (VPC addresses only). + :ivar network_interface_id: The network interface (if any) that the address is associated with (VPC addresses only). + :ivar network_interface_owner_id: The owner IID (VPC addresses only). + :ivar private_ip_address: The private IP address associated with the Elastic IP address (VPC addresses only). + """ + + def __init__(self, connection=None, public_ip=None, instance_id=None): + super(Address, self).__init__(connection) + self.connection = connection + self.public_ip = public_ip + self.instance_id = instance_id + self.domain = None + self.allocation_id = None + self.association_id = None + self.network_interface_id = None + self.network_interface_owner_id = None + self.private_ip_address = None + + def __repr__(self): + return 'Address:%s' % self.public_ip + + def endElement(self, name, value, connection): + if name == 'publicIp': + self.public_ip = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'domain': + self.domain = value + elif name == 'allocationId': + self.allocation_id = value + elif name == 'associationId': + self.association_id = value + elif name == 'networkInterfaceId': + self.network_interface_id = value + elif name == 'networkInterfaceOwnerId': + self.network_interface_owner_id = value + elif name == 'privateIpAddress': + self.private_ip_address = value + else: + setattr(self, name, value) + + def release(self, dry_run=False): + """ + Free up this Elastic IP address. + :see: :meth:`boto.ec2.connection.EC2Connection.release_address` + """ + if self.allocation_id: + return self.connection.release_address( + allocation_id=self.allocation_id, + dry_run=dry_run) + else: + return self.connection.release_address( + public_ip=self.public_ip, + dry_run=dry_run + ) + + delete = release + + def associate(self, instance_id=None, network_interface_id=None, private_ip_address=None, allow_reassociation=False, dry_run=False): + """ + Associate this Elastic IP address with a currently running instance. + :see: :meth:`boto.ec2.connection.EC2Connection.associate_address` + """ + if self.allocation_id: + return self.connection.associate_address( + instance_id=instance_id, + public_ip=self.public_ip, + allocation_id=self.allocation_id, + network_interface_id=network_interface_id, + private_ip_address=private_ip_address, + allow_reassociation=allow_reassociation, + dry_run=dry_run + ) + return self.connection.associate_address( + instance_id=instance_id, + public_ip=self.public_ip, + network_interface_id=network_interface_id, + private_ip_address=private_ip_address, + allow_reassociation=allow_reassociation, + dry_run=dry_run + ) + + def disassociate(self, dry_run=False): + """ + Disassociate this Elastic IP address from a currently running instance. + :see: :meth:`boto.ec2.connection.EC2Connection.disassociate_address` + """ + if self.association_id: + return self.connection.disassociate_address( + association_id=self.association_id, + dry_run=dry_run + ) + else: + return self.connection.disassociate_address( + public_ip=self.public_ip, + dry_run=dry_run + ) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/attributes.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/attributes.py new file mode 100644 index 0000000000000000000000000000000000000000..d76e5c5428f6a31ed5038fc18603ec40d240ef27 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/attributes.py @@ -0,0 +1,71 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class AccountAttribute(object): + def __init__(self, connection=None): + self.connection = connection + self.attribute_name = None + self.attribute_values = None + + def startElement(self, name, attrs, connection): + if name == 'attributeValueSet': + self.attribute_values = AttributeValues() + return self.attribute_values + + def endElement(self, name, value, connection): + if name == 'attributeName': + self.attribute_name = value + + +class AttributeValues(list): + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'attributeValue': + self.append(value) + + +class VPCAttribute(object): + def __init__(self, connection=None): + self.connection = connection + self.vpc_id = None + self.enable_dns_hostnames = None + self.enable_dns_support = None + self._current_attr = None + + def startElement(self, name, attrs, connection): + if name in ('enableDnsHostnames', 'enableDnsSupport'): + self._current_attr = name + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.vpc_id = value + elif name == 'value': + if value == 'true': + value = True + else: + value = False + if self._current_attr == 'enableDnsHostnames': + self.enable_dns_hostnames = value + elif self._current_attr == 'enableDnsSupport': + self.enable_dns_support = value diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..02413d9b659723cc5d04627a721dd8d5a18e75c1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/__init__.py @@ -0,0 +1,895 @@ +# Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/ +# Copyright (c) 2011 Jann Kleen +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +This module provides an interface to the Elastic Compute Cloud (EC2) +Auto Scaling service. +""" + +import base64 + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo, get_regions, load_regions +from boto.ec2.autoscale.request import Request +from boto.ec2.autoscale.launchconfig import LaunchConfiguration +from boto.ec2.autoscale.group import AutoScalingGroup +from boto.ec2.autoscale.group import ProcessType +from boto.ec2.autoscale.activity import Activity +from boto.ec2.autoscale.policy import AdjustmentType +from boto.ec2.autoscale.policy import MetricCollectionTypes +from boto.ec2.autoscale.policy import ScalingPolicy +from boto.ec2.autoscale.policy import TerminationPolicies +from boto.ec2.autoscale.instance import Instance +from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction +from boto.ec2.autoscale.tag import Tag +from boto.ec2.autoscale.limits import AccountLimits +from boto.compat import six + +RegionData = load_regions().get('autoscaling', {}) + + +def regions(): + """ + Get all available regions for the Auto Scaling service. + + :rtype: list + :return: A list of :class:`boto.RegionInfo` instances + """ + return get_regions('autoscaling', connection_cls=AutoScaleConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.ec2.autoscale.AutoScaleConnection`. + + :param str region_name: The name of the region to connect to. + + :rtype: :class:`boto.ec2.AutoScaleConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None + + +class AutoScaleConnection(AWSQueryConnection): + APIVersion = boto.config.get('Boto', 'autoscale_version', '2011-01-01') + DefaultRegionEndpoint = boto.config.get('Boto', 'autoscale_endpoint', + 'autoscaling.us-east-1.amazonaws.com') + DefaultRegionName = boto.config.get('Boto', 'autoscale_region_name', + 'us-east-1') + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, profile_name=None, + use_block_device_types=False): + """ + Init method to create a new connection to the AutoScaling service. + + B{Note:} The host argument is overridden by the host specified in the + boto configuration file. + + + """ + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint, + AutoScaleConnection) + self.region = region + self.use_block_device_types = use_block_device_types + super(AutoScaleConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path=path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def build_list_params(self, params, items, label): + """ + Items is a list of dictionaries or strings:: + + [ + { + 'Protocol' : 'HTTP', + 'LoadBalancerPort' : '80', + 'InstancePort' : '80' + }, + .. + ] etc. + + or:: + + ['us-east-1b',...] + """ + # different from EC2 list params + for i in range(1, len(items) + 1): + if isinstance(items[i - 1], dict): + for k, v in six.iteritems(items[i - 1]): + if isinstance(v, dict): + for kk, vv in six.iteritems(v): + params['%s.member.%d.%s.%s' % (label, i, k, kk)] = vv + else: + params['%s.member.%d.%s' % (label, i, k)] = v + elif isinstance(items[i - 1], six.string_types): + params['%s.member.%d' % (label, i)] = items[i - 1] + + def _update_group(self, op, as_group): + params = {'AutoScalingGroupName': as_group.name, + 'LaunchConfigurationName': as_group.launch_config_name, + 'MinSize': as_group.min_size, + 'MaxSize': as_group.max_size} + # get availability zone information (required param) + zones = as_group.availability_zones + self.build_list_params(params, zones, 'AvailabilityZones') + if as_group.desired_capacity is not None: + params['DesiredCapacity'] = as_group.desired_capacity + if as_group.vpc_zone_identifier: + params['VPCZoneIdentifier'] = as_group.vpc_zone_identifier + if as_group.health_check_period: + params['HealthCheckGracePeriod'] = as_group.health_check_period + if as_group.health_check_type: + params['HealthCheckType'] = as_group.health_check_type + if as_group.default_cooldown: + params['DefaultCooldown'] = as_group.default_cooldown + if as_group.placement_group: + params['PlacementGroup'] = as_group.placement_group + if as_group.instance_id: + params['InstanceId'] = as_group.instance_id + if as_group.termination_policies: + self.build_list_params(params, as_group.termination_policies, + 'TerminationPolicies') + if op.startswith('Create'): + # you can only associate load balancers with an autoscale + # group at creation time + if as_group.load_balancers: + self.build_list_params(params, as_group.load_balancers, + 'LoadBalancerNames') + if as_group.tags: + for i, tag in enumerate(as_group.tags): + tag.build_params(params, i + 1) + return self.get_object(op, params, Request) + + def attach_instances(self, name, instance_ids): + """ + Attach instances to an autoscaling group. + """ + params = { + 'AutoScalingGroupName': name, + } + self.build_list_params(params, instance_ids, 'InstanceIds') + return self.get_status('AttachInstances', params) + + def detach_instances(self, name, instance_ids, decrement_capacity=True): + """ + Detach instances from an Auto Scaling group. + + :type name: str + :param name: The name of the Auto Scaling group from which to detach instances. + + :type instance_ids: list + :param instance_ids: Instance ids to be detached from the Auto Scaling group. + + :type decrement_capacity: bool + :param decrement_capacity: Whether to decrement the size of the + Auto Scaling group or not. + """ + + params = {'AutoScalingGroupName': name} + params['ShouldDecrementDesiredCapacity'] = 'true' if decrement_capacity else 'false' + + self.build_list_params(params, instance_ids, 'InstanceIds') + return self.get_status('DetachInstances', params) + + def create_auto_scaling_group(self, as_group): + """ + Create auto scaling group. + """ + return self._update_group('CreateAutoScalingGroup', as_group) + + def delete_auto_scaling_group(self, name, force_delete=False): + """ + Deletes the specified auto scaling group if the group has no instances + and no scaling activities in progress. + """ + if(force_delete): + params = {'AutoScalingGroupName': name, 'ForceDelete': 'true'} + else: + params = {'AutoScalingGroupName': name} + return self.get_object('DeleteAutoScalingGroup', params, Request) + + def create_launch_configuration(self, launch_config): + """ + Creates a new Launch Configuration. + + :type launch_config: :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration` + :param launch_config: LaunchConfiguration object. + """ + params = {'ImageId': launch_config.image_id, + 'LaunchConfigurationName': launch_config.name, + 'InstanceType': launch_config.instance_type} + if launch_config.key_name: + params['KeyName'] = launch_config.key_name + if launch_config.user_data: + user_data = launch_config.user_data + if isinstance(user_data, six.text_type): + user_data = user_data.encode('utf-8') + params['UserData'] = base64.b64encode(user_data).decode('utf-8') + if launch_config.kernel_id: + params['KernelId'] = launch_config.kernel_id + if launch_config.ramdisk_id: + params['RamdiskId'] = launch_config.ramdisk_id + if launch_config.block_device_mappings: + [x.autoscale_build_list_params(params) for x in launch_config.block_device_mappings] + if launch_config.security_groups: + self.build_list_params(params, launch_config.security_groups, + 'SecurityGroups') + if launch_config.instance_monitoring: + params['InstanceMonitoring.Enabled'] = 'true' + else: + params['InstanceMonitoring.Enabled'] = 'false' + if launch_config.spot_price is not None: + params['SpotPrice'] = str(launch_config.spot_price) + if launch_config.instance_profile_name is not None: + params['IamInstanceProfile'] = launch_config.instance_profile_name + if launch_config.ebs_optimized: + params['EbsOptimized'] = 'true' + else: + params['EbsOptimized'] = 'false' + if launch_config.associate_public_ip_address is True: + params['AssociatePublicIpAddress'] = 'true' + elif launch_config.associate_public_ip_address is False: + params['AssociatePublicIpAddress'] = 'false' + if launch_config.volume_type: + params['VolumeType'] = launch_config.volume_type + if launch_config.delete_on_termination: + params['DeleteOnTermination'] = 'true' + else: + params['DeleteOnTermination'] = 'false' + if launch_config.iops: + params['Iops'] = launch_config.iops + if launch_config.classic_link_vpc_id: + params['ClassicLinkVPCId'] = launch_config.classic_link_vpc_id + if launch_config.classic_link_vpc_security_groups: + self.build_list_params( + params, + launch_config.classic_link_vpc_security_groups, + 'ClassicLinkVPCSecurityGroups' + ) + return self.get_object('CreateLaunchConfiguration', params, + Request, verb='POST') + + def get_account_limits(self): + """ + Returns the limits for the Auto Scaling resources currently granted for + your AWS account. + """ + params = {} + return self.get_object('DescribeAccountLimits', params, AccountLimits) + + def create_scaling_policy(self, scaling_policy): + """ + Creates a new Scaling Policy. + + :type scaling_policy: :class:`boto.ec2.autoscale.policy.ScalingPolicy` + :param scaling_policy: ScalingPolicy object. + """ + params = {'AdjustmentType': scaling_policy.adjustment_type, + 'AutoScalingGroupName': scaling_policy.as_name, + 'PolicyName': scaling_policy.name, + 'ScalingAdjustment': scaling_policy.scaling_adjustment} + + if scaling_policy.adjustment_type == "PercentChangeInCapacity" and \ + scaling_policy.min_adjustment_step is not None: + params['MinAdjustmentStep'] = scaling_policy.min_adjustment_step + + if scaling_policy.cooldown is not None: + params['Cooldown'] = scaling_policy.cooldown + + return self.get_object('PutScalingPolicy', params, Request) + + def delete_launch_configuration(self, launch_config_name): + """ + Deletes the specified LaunchConfiguration. + + The specified launch configuration must not be attached to an Auto + Scaling group. Once this call completes, the launch configuration is no + longer available for use. + """ + params = {'LaunchConfigurationName': launch_config_name} + return self.get_object('DeleteLaunchConfiguration', params, Request) + + def get_all_groups(self, names=None, max_records=None, next_token=None): + """ + Returns a full description of each Auto Scaling group in the given + list. This includes all Amazon EC2 instances that are members of the + group. If a list of names is not provided, the service returns the full + details of all Auto Scaling groups. + + This action supports pagination by returning a token if there are more + pages to retrieve. To get the next page, call this action again with + the returned token as the NextToken parameter. + + :type names: list + :param names: List of group names which should be searched for. + + :type max_records: int + :param max_records: Maximum amount of groups to return. + + :rtype: list + :returns: List of :class:`boto.ec2.autoscale.group.AutoScalingGroup` + instances. + """ + params = {} + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + if names: + self.build_list_params(params, names, 'AutoScalingGroupNames') + return self.get_list('DescribeAutoScalingGroups', params, + [('member', AutoScalingGroup)]) + + def get_all_launch_configurations(self, **kwargs): + """ + Returns a full description of the launch configurations given the + specified names. + + If no names are specified, then the full details of all launch + configurations are returned. + + :type names: list + :param names: List of configuration names which should be searched for. + + :type max_records: int + :param max_records: Maximum amount of configurations to return. + + :type next_token: str + :param next_token: If you have more results than can be returned + at once, pass in this parameter to page through all results. + + :rtype: list + :returns: List of + :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration` + instances. + """ + params = {} + max_records = kwargs.get('max_records', None) + names = kwargs.get('names', None) + if max_records is not None: + params['MaxRecords'] = max_records + if names: + self.build_list_params(params, names, 'LaunchConfigurationNames') + next_token = kwargs.get('next_token') + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeLaunchConfigurations', params, + [('member', LaunchConfiguration)]) + + def get_all_activities(self, autoscale_group, activity_ids=None, + max_records=None, next_token=None): + """ + Get all activities for the given autoscaling group. + + This action supports pagination by returning a token if there are more + pages to retrieve. To get the next page, call this action again with + the returned token as the NextToken parameter + + :type autoscale_group: str or + :class:`boto.ec2.autoscale.group.AutoScalingGroup` object + :param autoscale_group: The auto scaling group to get activities on. + + :type max_records: int + :param max_records: Maximum amount of activities to return. + + :rtype: list + :returns: List of + :class:`boto.ec2.autoscale.activity.Activity` instances. + """ + name = autoscale_group + if isinstance(autoscale_group, AutoScalingGroup): + name = autoscale_group.name + params = {'AutoScalingGroupName': name} + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + if activity_ids: + self.build_list_params(params, activity_ids, 'ActivityIds') + return self.get_list('DescribeScalingActivities', + params, [('member', Activity)]) + + def get_termination_policies(self): + """Gets all valid termination policies. + + These values can then be used as the termination_policies arg + when creating and updating autoscale groups. + """ + return self.get_object('DescribeTerminationPolicyTypes', + {}, TerminationPolicies) + + def delete_scheduled_action(self, scheduled_action_name, + autoscale_group=None): + """ + Deletes a previously scheduled action. + + :type scheduled_action_name: str + :param scheduled_action_name: The name of the action you want + to delete. + + :type autoscale_group: str + :param autoscale_group: The name of the autoscale group. + """ + params = {'ScheduledActionName': scheduled_action_name} + if autoscale_group: + params['AutoScalingGroupName'] = autoscale_group + return self.get_status('DeleteScheduledAction', params) + + def terminate_instance(self, instance_id, decrement_capacity=True): + """ + Terminates the specified instance. The desired group size can + also be adjusted, if desired. + + :type instance_id: str + :param instance_id: The ID of the instance to be terminated. + + :type decrement_capability: bool + :param decrement_capacity: Whether to decrement the size of the + autoscaling group or not. + """ + params = {'InstanceId': instance_id} + if decrement_capacity: + params['ShouldDecrementDesiredCapacity'] = 'true' + else: + params['ShouldDecrementDesiredCapacity'] = 'false' + return self.get_object('TerminateInstanceInAutoScalingGroup', params, + Activity) + + def delete_policy(self, policy_name, autoscale_group=None): + """ + Delete a policy. + + :type policy_name: str + :param policy_name: The name or ARN of the policy to delete. + + :type autoscale_group: str + :param autoscale_group: The name of the autoscale group. + """ + params = {'PolicyName': policy_name} + if autoscale_group: + params['AutoScalingGroupName'] = autoscale_group + return self.get_status('DeletePolicy', params) + + def get_all_adjustment_types(self): + return self.get_list('DescribeAdjustmentTypes', {}, + [('member', AdjustmentType)]) + + def get_all_autoscaling_instances(self, instance_ids=None, + max_records=None, next_token=None): + """ + Returns a description of each Auto Scaling instance in the instance_ids + list. If a list is not provided, the service returns the full details + of all instances up to a maximum of fifty. + + This action supports pagination by returning a token if there are more + pages to retrieve. To get the next page, call this action again with + the returned token as the NextToken parameter. + + :type instance_ids: list + :param instance_ids: List of Autoscaling Instance IDs which should be + searched for. + + :type max_records: int + :param max_records: Maximum number of results to return. + + :rtype: list + :returns: List of + :class:`boto.ec2.autoscale.instance.Instance` objects. + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceIds') + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeAutoScalingInstances', + params, [('member', Instance)]) + + def get_all_metric_collection_types(self): + """ + Returns a list of metrics and a corresponding list of granularities + for each metric. + """ + return self.get_object('DescribeMetricCollectionTypes', + {}, MetricCollectionTypes) + + def get_all_policies(self, as_group=None, policy_names=None, + max_records=None, next_token=None): + """ + Returns descriptions of what each policy does. This action supports + pagination. If the response includes a token, there are more records + available. To get the additional records, repeat the request with the + response token as the NextToken parameter. + + If no group name or list of policy names are provided, all + available policies are returned. + + :type as_group: str + :param as_group: The name of the + :class:`boto.ec2.autoscale.group.AutoScalingGroup` to filter for. + + :type policy_names: list + :param policy_names: List of policy names which should be searched for. + + :type max_records: int + :param max_records: Maximum amount of groups to return. + + :type next_token: str + :param next_token: If you have more results than can be returned + at once, pass in this parameter to page through all results. + """ + params = {} + if as_group: + params['AutoScalingGroupName'] = as_group + if policy_names: + self.build_list_params(params, policy_names, 'PolicyNames') + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribePolicies', params, + [('member', ScalingPolicy)]) + + def get_all_scaling_process_types(self): + """ + Returns scaling process types for use in the ResumeProcesses and + SuspendProcesses actions. + """ + return self.get_list('DescribeScalingProcessTypes', {}, + [('member', ProcessType)]) + + def suspend_processes(self, as_group, scaling_processes=None): + """ + Suspends Auto Scaling processes for an Auto Scaling group. + + :type as_group: string + :param as_group: The auto scaling group to suspend processes on. + + :type scaling_processes: list + :param scaling_processes: Processes you want to suspend. If omitted, + all processes will be suspended. + """ + params = {'AutoScalingGroupName': as_group} + if scaling_processes: + self.build_list_params(params, scaling_processes, + 'ScalingProcesses') + return self.get_status('SuspendProcesses', params) + + def resume_processes(self, as_group, scaling_processes=None): + """ + Resumes Auto Scaling processes for an Auto Scaling group. + + :type as_group: string + :param as_group: The auto scaling group to resume processes on. + + :type scaling_processes: list + :param scaling_processes: Processes you want to resume. If omitted, all + processes will be resumed. + """ + params = {'AutoScalingGroupName': as_group} + + if scaling_processes: + self.build_list_params(params, scaling_processes, + 'ScalingProcesses') + return self.get_status('ResumeProcesses', params) + + def create_scheduled_group_action(self, as_group, name, time=None, + desired_capacity=None, + min_size=None, max_size=None, + start_time=None, end_time=None, + recurrence=None): + """ + Creates a scheduled scaling action for a Auto Scaling group. If you + leave a parameter unspecified, the corresponding value remains + unchanged in the affected Auto Scaling group. + + :type as_group: string + :param as_group: The auto scaling group to get activities on. + + :type name: string + :param name: Scheduled action name. + + :type time: datetime.datetime + :param time: The time for this action to start. (Depracated) + + :type desired_capacity: int + :param desired_capacity: The number of EC2 instances that should + be running in this group. + + :type min_size: int + :param min_size: The minimum size for the new auto scaling group. + + :type max_size: int + :param max_size: The minimum size for the new auto scaling group. + + :type start_time: datetime.datetime + :param start_time: The time for this action to start. When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop. + + :type end_time: datetime.datetime + :param end_time: The time for this action to end. When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop. + + :type recurrence: string + :param recurrence: The time when recurring future actions will start. Start time is specified by the user following the Unix cron syntax format. EXAMPLE: '0 10 * * *' + """ + params = {'AutoScalingGroupName': as_group, + 'ScheduledActionName': name} + if start_time is not None: + params['StartTime'] = start_time.isoformat() + if end_time is not None: + params['EndTime'] = end_time.isoformat() + if recurrence is not None: + params['Recurrence'] = recurrence + if time: + params['Time'] = time.isoformat() + if desired_capacity is not None: + params['DesiredCapacity'] = desired_capacity + if min_size is not None: + params['MinSize'] = min_size + if max_size is not None: + params['MaxSize'] = max_size + return self.get_status('PutScheduledUpdateGroupAction', params) + + def get_all_scheduled_actions(self, as_group=None, start_time=None, + end_time=None, scheduled_actions=None, + max_records=None, next_token=None): + params = {} + if as_group: + params['AutoScalingGroupName'] = as_group + if scheduled_actions: + self.build_list_params(params, scheduled_actions, + 'ScheduledActionNames') + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeScheduledActions', params, + [('member', ScheduledUpdateGroupAction)]) + + def disable_metrics_collection(self, as_group, metrics=None): + """ + Disables monitoring of group metrics for the Auto Scaling group + specified in AutoScalingGroupName. You can specify the list of affected + metrics with the Metrics parameter. + """ + params = {'AutoScalingGroupName': as_group} + + if metrics: + self.build_list_params(params, metrics, 'Metrics') + return self.get_status('DisableMetricsCollection', params) + + def enable_metrics_collection(self, as_group, granularity, metrics=None): + """ + Enables monitoring of group metrics for the Auto Scaling group + specified in AutoScalingGroupName. You can specify the list of enabled + metrics with the Metrics parameter. + + Auto scaling metrics collection can be turned on only if the + InstanceMonitoring.Enabled flag, in the Auto Scaling group's launch + configuration, is set to true. + + :type autoscale_group: string + :param autoscale_group: The auto scaling group to get activities on. + + :type granularity: string + :param granularity: The granularity to associate with the metrics to + collect. Currently, the only legal granularity is "1Minute". + + :type metrics: string list + :param metrics: The list of metrics to collect. If no metrics are + specified, all metrics are enabled. + """ + params = {'AutoScalingGroupName': as_group, + 'Granularity': granularity} + if metrics: + self.build_list_params(params, metrics, 'Metrics') + return self.get_status('EnableMetricsCollection', params) + + def execute_policy(self, policy_name, as_group=None, honor_cooldown=None): + params = {'PolicyName': policy_name} + if as_group: + params['AutoScalingGroupName'] = as_group + if honor_cooldown: + params['HonorCooldown'] = honor_cooldown + return self.get_status('ExecutePolicy', params) + + def put_notification_configuration(self, autoscale_group, topic, notification_types): + """ + Configures an Auto Scaling group to send notifications when + specified events take place. + + :type autoscale_group: str or + :class:`boto.ec2.autoscale.group.AutoScalingGroup` object + :param autoscale_group: The Auto Scaling group to put notification + configuration on. + + :type topic: str + :param topic: The Amazon Resource Name (ARN) of the Amazon Simple + Notification Service (SNS) topic. + + :type notification_types: list + :param notification_types: The type of events that will trigger + the notification. Valid types are: + 'autoscaling:EC2_INSTANCE_LAUNCH', + 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', + 'autoscaling:EC2_INSTANCE_TERMINATE', + 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR', + 'autoscaling:TEST_NOTIFICATION' + """ + + name = autoscale_group + if isinstance(autoscale_group, AutoScalingGroup): + name = autoscale_group.name + + params = {'AutoScalingGroupName': name, + 'TopicARN': topic} + self.build_list_params(params, notification_types, 'NotificationTypes') + return self.get_status('PutNotificationConfiguration', params) + + def delete_notification_configuration(self, autoscale_group, topic): + """ + Deletes notifications created by put_notification_configuration. + + :type autoscale_group: str or + :class:`boto.ec2.autoscale.group.AutoScalingGroup` object + :param autoscale_group: The Auto Scaling group to put notification + configuration on. + + :type topic: str + :param topic: The Amazon Resource Name (ARN) of the Amazon Simple + Notification Service (SNS) topic. + """ + + name = autoscale_group + if isinstance(autoscale_group, AutoScalingGroup): + name = autoscale_group.name + + params = {'AutoScalingGroupName': name, + 'TopicARN': topic} + + return self.get_status('DeleteNotificationConfiguration', params) + + def set_instance_health(self, instance_id, health_status, + should_respect_grace_period=True): + """ + Explicitly set the health status of an instance. + + :type instance_id: str + :param instance_id: The identifier of the EC2 instance. + + :type health_status: str + :param health_status: The health status of the instance. + "Healthy" means that the instance is healthy and should remain + in service. "Unhealthy" means that the instance is unhealthy. + Auto Scaling should terminate and replace it. + + :type should_respect_grace_period: bool + :param should_respect_grace_period: If True, this call should + respect the grace period associated with the group. + """ + params = {'InstanceId': instance_id, + 'HealthStatus': health_status} + if should_respect_grace_period: + params['ShouldRespectGracePeriod'] = 'true' + else: + params['ShouldRespectGracePeriod'] = 'false' + return self.get_status('SetInstanceHealth', params) + + def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False): + """ + Adjusts the desired size of the AutoScalingGroup by initiating scaling + activities. When reducing the size of the group, it is not possible to define + which Amazon EC2 instances will be terminated. This applies to any Auto Scaling + decisions that might result in terminating instances. + + :type group_name: string + :param group_name: name of the auto scaling group + + :type desired_capacity: integer + :param desired_capacity: new capacity setting for auto scaling group + + :type honor_cooldown: boolean + :param honor_cooldown: by default, overrides any cooldown period + """ + params = {'AutoScalingGroupName': group_name, + 'DesiredCapacity': desired_capacity} + if honor_cooldown: + params['HonorCooldown'] = 'true' + + return self.get_status('SetDesiredCapacity', params) + + # Tag methods + + def get_all_tags(self, filters=None, max_records=None, next_token=None): + """ + Lists the Auto Scaling group tags. + + This action supports pagination by returning a token if there + are more pages to retrieve. To get the next page, call this + action again with the returned token as the NextToken + parameter. + + :type filters: dict + :param filters: The value of the filter type used to identify + the tags to be returned. NOT IMPLEMENTED YET. + + :type max_records: int + :param max_records: Maximum number of tags to return. + + :rtype: list + :returns: List of :class:`boto.ec2.autoscale.tag.Tag` + instances. + """ + params = {} + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeTags', params, + [('member', Tag)]) + + def create_or_update_tags(self, tags): + """ + Creates new tags or updates existing tags for an Auto Scaling group. + + :type tags: List of :class:`boto.ec2.autoscale.tag.Tag` + :param tags: The new or updated tags. + """ + params = {} + for i, tag in enumerate(tags): + tag.build_params(params, i + 1) + return self.get_status('CreateOrUpdateTags', params, verb='POST') + + def delete_tags(self, tags): + """ + Deletes existing tags for an Auto Scaling group. + + :type tags: List of :class:`boto.ec2.autoscale.tag.Tag` + :param tags: The new or updated tags. + """ + params = {} + for i, tag in enumerate(tags): + tag.build_params(params, i + 1) + return self.get_status('DeleteTags', params, verb='POST') diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/activity.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/activity.py new file mode 100644 index 0000000000000000000000000000000000000000..bfe32f436d41de21806493470f2ba7e16f693e06 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/activity.py @@ -0,0 +1,73 @@ +# Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from datetime import datetime + + +class Activity(object): + def __init__(self, connection=None): + self.connection = connection + self.start_time = None + self.end_time = None + self.activity_id = None + self.progress = None + self.status_code = None + self.cause = None + self.description = None + self.status_message = None + self.group_name = None + + def __repr__(self): + return 'Activity<%s>: For group:%s, progress:%s, cause:%s' % (self.activity_id, + self.group_name, + self.status_message, + self.cause) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ActivityId': + self.activity_id = value + elif name == 'AutoScalingGroupName': + self.group_name = value + elif name == 'StartTime': + try: + self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + elif name == 'EndTime': + try: + self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + elif name == 'Progress': + self.progress = value + elif name == 'Cause': + self.cause = value + elif name == 'Description': + self.description = value + elif name == 'StatusMessage': + self.status_message = value + elif name == 'StatusCode': + self.status_code = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/group.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/group.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c041275def0a447226b5918103f45dca5fd937 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/group.py @@ -0,0 +1,361 @@ +# Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.elb.listelement import ListElement +from boto.resultset import ResultSet +from boto.ec2.autoscale.launchconfig import LaunchConfiguration +from boto.ec2.autoscale.request import Request +from boto.ec2.autoscale.instance import Instance +from boto.ec2.autoscale.tag import Tag + + +class ProcessType(object): + def __init__(self, connection=None): + self.connection = connection + self.process_name = None + + def __repr__(self): + return 'ProcessType(%s)' % self.process_name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'ProcessName': + self.process_name = value + + +class SuspendedProcess(object): + def __init__(self, connection=None): + self.connection = connection + self.process_name = None + self.reason = None + + def __repr__(self): + return 'SuspendedProcess(%s, %s)' % (self.process_name, self.reason) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'ProcessName': + self.process_name = value + elif name == 'SuspensionReason': + self.reason = value + + +class EnabledMetric(object): + def __init__(self, connection=None, metric=None, granularity=None): + self.connection = connection + self.metric = metric + self.granularity = granularity + + def __repr__(self): + return 'EnabledMetric(%s, %s)' % (self.metric, self.granularity) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Granularity': + self.granularity = value + elif name == 'Metric': + self.metric = value + + +class TerminationPolicies(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'member': + self.append(value) + + +class AutoScalingGroup(object): + def __init__(self, connection=None, name=None, + launch_config=None, availability_zones=None, + load_balancers=None, default_cooldown=None, + health_check_type=None, health_check_period=None, + placement_group=None, vpc_zone_identifier=None, + desired_capacity=None, min_size=None, max_size=None, + tags=None, termination_policies=None, instance_id=None, + **kwargs): + """ + Creates a new AutoScalingGroup with the specified name. + + You must not have already used up your entire quota of + AutoScalingGroups in order for this call to be successful. Once the + creation request is completed, the AutoScalingGroup is ready to be + used in other calls. + + :type name: str + :param name: Name of autoscaling group (required). + + :type availability_zones: list + :param availability_zones: List of availability zones (required). + + :type default_cooldown: int + :param default_cooldown: Number of seconds after a Scaling Activity + completes before any further scaling activities can start. + + :type desired_capacity: int + :param desired_capacity: The desired capacity for the group. + + :type health_check_period: str + :param health_check_period: Length of time in seconds after a new + EC2 instance comes into service that Auto Scaling starts + checking its health. + + :type health_check_type: str + :param health_check_type: The service you want the health status from, + Amazon EC2 or Elastic Load Balancer. + + :type launch_config: str or LaunchConfiguration + :param launch_config: Name of launch configuration (required). + + :type load_balancers: list + :param load_balancers: List of load balancers. + + :type max_size: int + :param max_size: Maximum size of group (required). + + :type min_size: int + :param min_size: Minimum size of group (required). + + :type placement_group: str + :param placement_group: Physical location of your cluster placement + group created in Amazon EC2. + + :type vpc_zone_identifier: str or list + :param vpc_zone_identifier: A comma-separated string or python list of + the subnet identifiers of the Virtual Private Cloud. + + :type tags: list + :param tags: List of :class:`boto.ec2.autoscale.tag.Tag`s + + :type termination_policies: list + :param termination_policies: A list of termination policies. Valid values + are: "OldestInstance", "NewestInstance", "OldestLaunchConfiguration", + "ClosestToNextInstanceHour", "Default". If no value is specified, + the "Default" value is used. + + :type instance_id: str + :param instance_id: The ID of the Amazon EC2 instance you want to use + to create the Auto Scaling group. + + :rtype: :class:`boto.ec2.autoscale.group.AutoScalingGroup` + :return: An autoscale group. + """ + self.name = name or kwargs.get('group_name') # backwards compat + self.connection = connection + self.min_size = int(min_size) if min_size is not None else None + self.max_size = int(max_size) if max_size is not None else None + self.created_time = None + # backwards compatibility + default_cooldown = default_cooldown or kwargs.get('cooldown') + if default_cooldown is not None: + default_cooldown = int(default_cooldown) + self.default_cooldown = default_cooldown + self.launch_config_name = launch_config + if launch_config and isinstance(launch_config, LaunchConfiguration): + self.launch_config_name = launch_config.name + self.desired_capacity = desired_capacity + lbs = load_balancers or [] + self.load_balancers = ListElement(lbs) + zones = availability_zones or [] + self.availability_zones = ListElement(zones) + self.health_check_period = health_check_period + self.health_check_type = health_check_type + self.placement_group = placement_group + self.autoscaling_group_arn = None + if type(vpc_zone_identifier) is list: + vpc_zone_identifier = ','.join(vpc_zone_identifier) + self.vpc_zone_identifier = vpc_zone_identifier + self.instances = None + self.tags = tags or None + termination_policies = termination_policies or [] + self.termination_policies = ListElement(termination_policies) + self.instance_id = instance_id + + # backwards compatible access to 'cooldown' param + def _get_cooldown(self): + return self.default_cooldown + + def _set_cooldown(self, val): + self.default_cooldown = val + + cooldown = property(_get_cooldown, _set_cooldown) + + def __repr__(self): + return 'AutoScaleGroup<%s>' % self.name + + def startElement(self, name, attrs, connection): + if name == 'Instances': + self.instances = ResultSet([('member', Instance)]) + return self.instances + elif name == 'LoadBalancerNames': + return self.load_balancers + elif name == 'AvailabilityZones': + return self.availability_zones + elif name == 'EnabledMetrics': + self.enabled_metrics = ResultSet([('member', EnabledMetric)]) + return self.enabled_metrics + elif name == 'SuspendedProcesses': + self.suspended_processes = ResultSet([('member', SuspendedProcess)]) + return self.suspended_processes + elif name == 'Tags': + self.tags = ResultSet([('member', Tag)]) + return self.tags + elif name == 'TerminationPolicies': + return self.termination_policies + else: + return + + def endElement(self, name, value, connection): + if name == 'MinSize': + self.min_size = int(value) + elif name == 'AutoScalingGroupARN': + self.autoscaling_group_arn = value + elif name == 'CreatedTime': + self.created_time = value + elif name == 'DefaultCooldown': + self.default_cooldown = int(value) + elif name == 'LaunchConfigurationName': + self.launch_config_name = value + elif name == 'DesiredCapacity': + self.desired_capacity = int(value) + elif name == 'MaxSize': + self.max_size = int(value) + elif name == 'AutoScalingGroupName': + self.name = value + elif name == 'PlacementGroup': + self.placement_group = value + elif name == 'HealthCheckGracePeriod': + try: + self.health_check_period = int(value) + except ValueError: + self.health_check_period = None + elif name == 'HealthCheckType': + self.health_check_type = value + elif name == 'VPCZoneIdentifier': + self.vpc_zone_identifier = value + elif name == 'InstanceId': + self.instance_id = value + else: + setattr(self, name, value) + + def set_capacity(self, capacity): + """ + Set the desired capacity for the group. + """ + params = {'AutoScalingGroupName': self.name, + 'DesiredCapacity': capacity} + req = self.connection.get_object('SetDesiredCapacity', params, + Request) + self.connection.last_request = req + return req + + def update(self): + """ + Sync local changes with AutoScaling group. + """ + return self.connection._update_group('UpdateAutoScalingGroup', self) + + def shutdown_instances(self): + """ + Convenience method which shuts down all instances associated with + this group. + """ + self.min_size = 0 + self.max_size = 0 + self.desired_capacity = 0 + self.update() + + def delete(self, force_delete=False): + """ + Delete this auto-scaling group if no instances attached or no + scaling activities in progress. + """ + return self.connection.delete_auto_scaling_group(self.name, + force_delete) + + def get_activities(self, activity_ids=None, max_records=50): + """ + Get all activies for this group. + """ + return self.connection.get_all_activities(self, activity_ids, + max_records) + + def put_notification_configuration(self, topic, notification_types): + """ + Configures an Auto Scaling group to send notifications when + specified events take place. Valid notification types are: + 'autoscaling:EC2_INSTANCE_LAUNCH', + 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', + 'autoscaling:EC2_INSTANCE_TERMINATE', + 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR', + 'autoscaling:TEST_NOTIFICATION' + """ + return self.connection.put_notification_configuration(self, + topic, + notification_types) + + def delete_notification_configuration(self, topic): + """ + Deletes notifications created by put_notification_configuration. + """ + return self.connection.delete_notification_configuration(self, topic) + + def suspend_processes(self, scaling_processes=None): + """ + Suspends Auto Scaling processes for an Auto Scaling group. + """ + return self.connection.suspend_processes(self.name, scaling_processes) + + def resume_processes(self, scaling_processes=None): + """ + Resumes Auto Scaling processes for an Auto Scaling group. + """ + return self.connection.resume_processes(self.name, scaling_processes) + + +class AutoScalingGroupMetric(object): + def __init__(self, connection=None): + + self.connection = connection + self.metric = None + self.granularity = None + + def __repr__(self): + return 'AutoScalingGroupMetric:%s' % self.metric + + def startElement(self, name, attrs, connection): + return + + def endElement(self, name, value, connection): + if name == 'Metric': + self.metric = value + elif name == 'Granularity': + self.granularity = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/instance.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/instance.py new file mode 100644 index 0000000000000000000000000000000000000000..6095c17be5a4d7957a1f3465cee1a828a92f23b8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/instance.py @@ -0,0 +1,59 @@ +# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Instance(object): + def __init__(self, connection=None): + self.connection = connection + self.instance_id = None + self.health_status = None + self.launch_config_name = None + self.lifecycle_state = None + self.availability_zone = None + self.group_name = None + + def __repr__(self): + r = 'Instance' % (self.metrics, self.granularities) + + def startElement(self, name, attrs, connection): + if name == 'Granularities': + self.granularities = ResultSet([('member', self.Granularity)]) + return self.granularities + elif name == 'Metrics': + self.metrics = ResultSet([('member', self.Metric)]) + return self.metrics + + def endElement(self, name, value, connection): + return + + +class ScalingPolicy(object): + def __init__(self, connection=None, **kwargs): + """ + Scaling Policy + + :type name: str + :param name: Name of scaling policy. + + :type adjustment_type: str + :param adjustment_type: Specifies the type of adjustment. Valid values are `ChangeInCapacity`, `ExactCapacity` and `PercentChangeInCapacity`. + + :type as_name: str or int + :param as_name: Name or ARN of the Auto Scaling Group. + + :type scaling_adjustment: int + :param scaling_adjustment: Value of adjustment (type specified in `adjustment_type`). + + :type min_adjustment_step: int + :param min_adjustment_step: Value of min adjustment step required to + apply the scaling policy (only make sense when use `PercentChangeInCapacity` as adjustment_type.). + + :type cooldown: int + :param cooldown: Time (in seconds) before Alarm related Scaling Activities can start after the previous Scaling Activity ends. + + """ + self.name = kwargs.get('name', None) + self.adjustment_type = kwargs.get('adjustment_type', None) + self.as_name = kwargs.get('as_name', None) + self.scaling_adjustment = kwargs.get('scaling_adjustment', None) + self.cooldown = kwargs.get('cooldown', None) + self.connection = connection + self.min_adjustment_step = kwargs.get('min_adjustment_step', None) + + def __repr__(self): + return 'ScalingPolicy(%s group:%s adjustment:%s)' % (self.name, + self.as_name, + self.adjustment_type) + + def startElement(self, name, attrs, connection): + if name == 'Alarms': + self.alarms = ResultSet([('member', Alarm)]) + return self.alarms + + def endElement(self, name, value, connection): + if name == 'PolicyName': + self.name = value + elif name == 'AutoScalingGroupName': + self.as_name = value + elif name == 'PolicyARN': + self.policy_arn = value + elif name == 'ScalingAdjustment': + self.scaling_adjustment = int(value) + elif name == 'Cooldown': + self.cooldown = int(value) + elif name == 'AdjustmentType': + self.adjustment_type = value + elif name == 'MinAdjustmentStep': + self.min_adjustment_step = int(value) + + def delete(self): + return self.connection.delete_policy(self.name, self.as_name) + + +class TerminationPolicies(list): + def __init__(self, connection=None, **kwargs): + pass + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'member': + self.append(value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/request.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/request.py new file mode 100644 index 0000000000000000000000000000000000000000..b17b534fedc4bd3f556001638a245c1ddeea056b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/request.py @@ -0,0 +1,38 @@ +# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Request(object): + def __init__(self, connection=None): + self.connection = connection + self.request_id = '' + + def __repr__(self): + return 'Request:%s' % self.request_id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'RequestId': + self.request_id = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/scheduled.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/scheduled.py new file mode 100644 index 0000000000000000000000000000000000000000..8d2eda407dbc2625f88a47e911bc50a84aac58ec --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/scheduled.py @@ -0,0 +1,77 @@ +# Copyright (c) 2009-2010 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +from datetime import datetime + + +class ScheduledUpdateGroupAction(object): + def __init__(self, connection=None): + self.connection = connection + self.name = None + self.action_arn = None + self.as_group = None + self.time = None + self.start_time = None + self.end_time = None + self.recurrence = None + self.desired_capacity = None + self.max_size = None + self.min_size = None + + def __repr__(self): + return 'ScheduledUpdateGroupAction:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DesiredCapacity': + self.desired_capacity = value + elif name == 'ScheduledActionName': + self.name = value + elif name == 'AutoScalingGroupName': + self.as_group = value + elif name == 'MaxSize': + self.max_size = int(value) + elif name == 'MinSize': + self.min_size = int(value) + elif name == 'ScheduledActionARN': + self.action_arn = value + elif name == 'Recurrence': + self.recurrence = value + elif name == 'Time': + try: + self.time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + self.time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + elif name == 'StartTime': + try: + self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + elif name == 'EndTime': + try: + self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/tag.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/tag.py new file mode 100644 index 0000000000000000000000000000000000000000..a783edf0965600ed19a8cda7d5ee68b4202bc949 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/autoscale/tag.py @@ -0,0 +1,84 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Tag(object): + """ + A name/value tag on an AutoScalingGroup resource. + + :ivar key: The key of the tag. + :ivar value: The value of the tag. + :ivar propagate_at_launch: Boolean value which specifies whether the + new tag will be applied to instances launched after the tag is created. + :ivar resource_id: The name of the autoscaling group. + :ivar resource_type: The only supported resource type at this time + is "auto-scaling-group". + """ + + def __init__(self, connection=None, key=None, value=None, + propagate_at_launch=False, resource_id=None, + resource_type='auto-scaling-group'): + self.connection = connection + self.key = key + self.value = value + self.propagate_at_launch = propagate_at_launch + self.resource_id = resource_id + self.resource_type = resource_type + + def __repr__(self): + return 'Tag(%s=%s)' % (self.key, self.value) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Key': + self.key = value + elif name == 'Value': + self.value = value + elif name == 'PropagateAtLaunch': + if value.lower() == 'true': + self.propagate_at_launch = True + else: + self.propagate_at_launch = False + elif name == 'ResourceId': + self.resource_id = value + elif name == 'ResourceType': + self.resource_type = value + + def build_params(self, params, i): + """ + Populates a dictionary with the name/value pairs necessary + to identify this Tag in a request. + """ + prefix = 'Tags.member.%d.' % i + params[prefix + 'ResourceId'] = self.resource_id + params[prefix + 'ResourceType'] = self.resource_type + params[prefix + 'Key'] = self.key + params[prefix + 'Value'] = self.value + if self.propagate_at_launch: + params[prefix + 'PropagateAtLaunch'] = 'true' + else: + params[prefix + 'PropagateAtLaunch'] = 'false' + + def delete(self): + return self.connection.delete_tags([self]) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/blockdevicemapping.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/blockdevicemapping.py new file mode 100644 index 0000000000000000000000000000000000000000..2f4e1faf4b41b269fd4b28c9b3eb35132573656a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/blockdevicemapping.py @@ -0,0 +1,165 @@ +# Copyright (c) 2009-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class BlockDeviceType(object): + """ + Represents parameters for a block device. + """ + + def __init__(self, + connection=None, + ephemeral_name=None, + no_device=False, + volume_id=None, + snapshot_id=None, + status=None, + attach_time=None, + delete_on_termination=False, + size=None, + volume_type=None, + iops=None, + encrypted=None): + self.connection = connection + self.ephemeral_name = ephemeral_name + self.no_device = no_device + self.volume_id = volume_id + self.snapshot_id = snapshot_id + self.status = status + self.attach_time = attach_time + self.delete_on_termination = delete_on_termination + self.size = size + self.volume_type = volume_type + self.iops = iops + self.encrypted = encrypted + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + lname = name.lower() + if name == 'volumeId': + self.volume_id = value + elif lname == 'virtualname': + self.ephemeral_name = value + elif lname == 'nodevice': + self.no_device = (value == 'true') + elif lname == 'snapshotid': + self.snapshot_id = value + elif lname == 'volumesize': + self.size = int(value) + elif lname == 'status': + self.status = value + elif lname == 'attachtime': + self.attach_time = value + elif lname == 'deleteontermination': + self.delete_on_termination = (value == 'true') + elif lname == 'volumetype': + self.volume_type = value + elif lname == 'iops': + self.iops = int(value) + elif lname == 'encrypted': + self.encrypted = (value == 'true') + else: + setattr(self, name, value) + +# for backwards compatibility +EBSBlockDeviceType = BlockDeviceType + + +class BlockDeviceMapping(dict): + """ + Represents a collection of BlockDeviceTypes when creating ec2 instances. + + Example: + dev_sda1 = BlockDeviceType() + dev_sda1.size = 100 # change root volume to 100GB instead of default + bdm = BlockDeviceMapping() + bdm['/dev/sda1'] = dev_sda1 + reservation = image.run(..., block_device_map=bdm, ...) + """ + + def __init__(self, connection=None): + """ + :type connection: :class:`boto.ec2.EC2Connection` + :param connection: Optional connection. + """ + dict.__init__(self) + self.connection = connection + self.current_name = None + self.current_value = None + + def startElement(self, name, attrs, connection): + lname = name.lower() + if lname in ['ebs', 'virtualname']: + self.current_value = BlockDeviceType(self) + return self.current_value + + def endElement(self, name, value, connection): + lname = name.lower() + if lname in ['device', 'devicename']: + self.current_name = value + elif lname in ['item', 'member']: + self[self.current_name] = self.current_value + + def ec2_build_list_params(self, params, prefix=''): + pre = '%sBlockDeviceMapping' % prefix + return self._build_list_params(params, prefix=pre) + + def autoscale_build_list_params(self, params, prefix=''): + pre = '%sBlockDeviceMappings.member' % prefix + return self._build_list_params(params, prefix=pre) + + def _build_list_params(self, params, prefix=''): + i = 1 + for dev_name in self: + pre = '%s.%d' % (prefix, i) + params['%s.DeviceName' % pre] = dev_name + block_dev = self[dev_name] + if block_dev.ephemeral_name: + params['%s.VirtualName' % pre] = block_dev.ephemeral_name + else: + if block_dev.no_device: + params['%s.NoDevice' % pre] = '' + else: + if block_dev.snapshot_id: + params['%s.Ebs.SnapshotId' % pre] = block_dev.snapshot_id + if block_dev.size: + params['%s.Ebs.VolumeSize' % pre] = block_dev.size + if block_dev.delete_on_termination: + params['%s.Ebs.DeleteOnTermination' % pre] = 'true' + else: + params['%s.Ebs.DeleteOnTermination' % pre] = 'false' + if block_dev.volume_type: + params['%s.Ebs.VolumeType' % pre] = block_dev.volume_type + if block_dev.iops is not None: + params['%s.Ebs.Iops' % pre] = block_dev.iops + # The encrypted flag (even if False) cannot be specified for the root EBS + # volume. + if block_dev.encrypted is not None: + if block_dev.encrypted: + params['%s.Ebs.Encrypted' % pre] = 'true' + else: + params['%s.Ebs.Encrypted' % pre] = 'false' + + i += 1 diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/bundleinstance.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/bundleinstance.py new file mode 100644 index 0000000000000000000000000000000000000000..e241da9adf8be3061995c81a8ded949e016a5b61 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/bundleinstance.py @@ -0,0 +1,78 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Bundle Task +""" + +from boto.ec2.ec2object import EC2Object + + +class BundleInstanceTask(EC2Object): + + def __init__(self, connection=None): + super(BundleInstanceTask, self).__init__(connection) + self.id = None + self.instance_id = None + self.progress = None + self.start_time = None + self.state = None + self.bucket = None + self.prefix = None + self.upload_policy = None + self.upload_policy_signature = None + self.update_time = None + self.code = None + self.message = None + + def __repr__(self): + return 'BundleInstanceTask:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'bundleId': + self.id = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'progress': + self.progress = value + elif name == 'startTime': + self.start_time = value + elif name == 'state': + self.state = value + elif name == 'bucket': + self.bucket = value + elif name == 'prefix': + self.prefix = value + elif name == 'uploadPolicy': + self.upload_policy = value + elif name == 'uploadPolicySignature': + self.upload_policy_signature = value + elif name == 'updateTime': + self.update_time = value + elif name == 'code': + self.code = value + elif name == 'message': + self.message = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/buyreservation.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/buyreservation.py new file mode 100644 index 0000000000000000000000000000000000000000..786d0fed282da2ddf27d438468a02ddb07ee2110 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/buyreservation.py @@ -0,0 +1,85 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto.ec2 +from boto.sdb.db.property import StringProperty, IntegerProperty +from boto.manage import propget +from boto.compat import six + +InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', + 'c1.medium', 'c1.xlarge', 'm2.xlarge', + 'm2.2xlarge', 'm2.4xlarge', 'cc1.4xlarge', + 't1.micro'] + + +class BuyReservation(object): + + def get_region(self, params): + if not params.get('region', None): + prop = StringProperty(name='region', verbose_name='EC2 Region', + choices=boto.ec2.regions) + params['region'] = propget.get(prop, choices=boto.ec2.regions) + + def get_instance_type(self, params): + if not params.get('instance_type', None): + prop = StringProperty(name='instance_type', verbose_name='Instance Type', + choices=InstanceTypes) + params['instance_type'] = propget.get(prop) + + def get_quantity(self, params): + if not params.get('quantity', None): + prop = IntegerProperty(name='quantity', verbose_name='Number of Instances') + params['quantity'] = propget.get(prop) + + def get_zone(self, params): + if not params.get('zone', None): + prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', + choices=self.ec2.get_all_zones) + params['zone'] = propget.get(prop) + + def get(self, params): + self.get_region(params) + self.ec2 = params['region'].connect() + self.get_instance_type(params) + self.get_zone(params) + self.get_quantity(params) + +if __name__ == "__main__": + obj = BuyReservation() + params = {} + obj.get(params) + offerings = obj.ec2.get_all_reserved_instances_offerings(instance_type=params['instance_type'], + availability_zone=params['zone'].name) + print('\nThe following Reserved Instances Offerings are available:\n') + for offering in offerings: + offering.describe() + prop = StringProperty(name='offering', verbose_name='Offering', + choices=offerings) + offering = propget.get(prop) + print('\nYou have chosen this offering:') + offering.describe() + unit_price = float(offering.fixed_price) + total_price = unit_price * params['quantity'] + print('!!! You are about to purchase %d of these offerings for a total of $%.2f !!!' % (params['quantity'], total_price)) + answer = six.moves.input('Are you sure you want to do this? If so, enter YES: ') + if answer.strip().lower() == 'yes': + offering.purchase(params['quantity']) + else: + print('Purchase cancelled') diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b150b4e8f27b5a0a4251ba32764cc9d50aaa0f8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/__init__.py @@ -0,0 +1,593 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This module provides an interface to the Elastic Compute Cloud (EC2) +CloudWatch service from AWS. +""" +from boto.compat import json, map, six, zip +from boto.connection import AWSQueryConnection +from boto.ec2.cloudwatch.metric import Metric +from boto.ec2.cloudwatch.alarm import MetricAlarm, MetricAlarms, AlarmHistoryItem +from boto.ec2.cloudwatch.datapoint import Datapoint +from boto.regioninfo import RegionInfo, get_regions, load_regions +import boto + +RegionData = load_regions().get('cloudwatch', {}) + + +def regions(): + """ + Get all available regions for the CloudWatch service. + + :rtype: list + :return: A list of :class:`boto.RegionInfo` instances + """ + return get_regions('cloudwatch', connection_cls=CloudWatchConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.ec2.cloudwatch.CloudWatchConnection`. + + :param str region_name: The name of the region to connect to. + + :rtype: :class:`boto.ec2.CloudWatchConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None + + +class CloudWatchConnection(AWSQueryConnection): + + APIVersion = boto.config.get('Boto', 'cloudwatch_version', '2010-08-01') + DefaultRegionName = boto.config.get('Boto', 'cloudwatch_region_name', + 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', + 'cloudwatch_region_endpoint', + 'monitoring.us-east-1.amazonaws.com') + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + """ + Init method to create a new connection to EC2 Monitoring Service. + + B{Note:} The host argument is overridden by the host specified in the + boto configuration file. + """ + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + + # Ugly hack to get around both a bug in Python and a + # misconfigured SSL cert for the eu-west-1 endpoint + if self.region.name == 'eu-west-1': + validate_certs = False + + super(CloudWatchConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def build_dimension_param(self, dimension, params): + prefix = 'Dimensions.member' + i = 0 + for dim_name in dimension: + dim_value = dimension[dim_name] + if dim_value: + if isinstance(dim_value, six.string_types): + dim_value = [dim_value] + for value in dim_value: + params['%s.%d.Name' % (prefix, i + 1)] = dim_name + params['%s.%d.Value' % (prefix, i + 1)] = value + i += 1 + else: + params['%s.%d.Name' % (prefix, i + 1)] = dim_name + i += 1 + + def build_list_params(self, params, items, label): + if isinstance(items, six.string_types): + items = [items] + for index, item in enumerate(items): + i = index + 1 + if isinstance(item, dict): + for k, v in six.iteritems(item): + params[label % (i, 'Name')] = k + if v is not None: + params[label % (i, 'Value')] = v + else: + params[label % i] = item + + def build_put_params(self, params, name, value=None, timestamp=None, + unit=None, dimensions=None, statistics=None): + args = (name, value, unit, dimensions, statistics, timestamp) + length = max(map(lambda a: len(a) if isinstance(a, list) else 1, args)) + + def aslist(a): + if isinstance(a, list): + if len(a) != length: + raise Exception('Must specify equal number of elements; expected %d.' % length) + return a + return [a] * length + + for index, (n, v, u, d, s, t) in enumerate(zip(*map(aslist, args))): + metric_data = {'MetricName': n} + + if timestamp: + metric_data['Timestamp'] = t.isoformat() + + if unit: + metric_data['Unit'] = u + + if dimensions: + self.build_dimension_param(d, metric_data) + + if statistics: + metric_data['StatisticValues.Maximum'] = s['maximum'] + metric_data['StatisticValues.Minimum'] = s['minimum'] + metric_data['StatisticValues.SampleCount'] = s['samplecount'] + metric_data['StatisticValues.Sum'] = s['sum'] + if value is not None: + msg = 'You supplied a value and statistics for a ' + \ + 'metric.Posting statistics and not value.' + boto.log.warn(msg) + elif value is not None: + metric_data['Value'] = v + else: + raise Exception('Must specify a value or statistics to put.') + + for key, val in six.iteritems(metric_data): + params['MetricData.member.%d.%s' % (index + 1, key)] = val + + def get_metric_statistics(self, period, start_time, end_time, metric_name, + namespace, statistics, dimensions=None, + unit=None): + """ + Get time-series data for one or more statistics of a given metric. + + :type period: integer + :param period: The granularity, in seconds, of the returned datapoints. + Period must be at least 60 seconds and must be a multiple + of 60. The default value is 60. + + :type start_time: datetime + :param start_time: The time stamp to use for determining the + first datapoint to return. The value specified is + inclusive; results include datapoints with the time stamp + specified. + + :type end_time: datetime + :param end_time: The time stamp to use for determining the + last datapoint to return. The value specified is + exclusive; results will include datapoints up to the time + stamp specified. + + :type metric_name: string + :param metric_name: The metric name. + + :type namespace: string + :param namespace: The metric's namespace. + + :type statistics: list + :param statistics: A list of statistics names Valid values: + Average | Sum | SampleCount | Maximum | Minimum + + :type dimensions: dict + :param dimensions: A dictionary of dimension key/values where + the key is the dimension name and the value + is either a scalar value or an iterator + of values to be associated with that + dimension. + + :type unit: string + :param unit: The unit for the metric. Value values are: + Seconds | Microseconds | Milliseconds | Bytes | Kilobytes | + Megabytes | Gigabytes | Terabytes | Bits | Kilobits | + Megabits | Gigabits | Terabits | Percent | Count | + Bytes/Second | Kilobytes/Second | Megabytes/Second | + Gigabytes/Second | Terabytes/Second | Bits/Second | + Kilobits/Second | Megabits/Second | Gigabits/Second | + Terabits/Second | Count/Second | None + + :rtype: list + """ + params = {'Period': period, + 'MetricName': metric_name, + 'Namespace': namespace, + 'StartTime': start_time.isoformat(), + 'EndTime': end_time.isoformat()} + self.build_list_params(params, statistics, 'Statistics.member.%d') + if dimensions: + self.build_dimension_param(dimensions, params) + if unit: + params['Unit'] = unit + return self.get_list('GetMetricStatistics', params, + [('member', Datapoint)]) + + def list_metrics(self, next_token=None, dimensions=None, + metric_name=None, namespace=None): + """ + Returns a list of the valid metrics for which there is recorded + data available. + + :type next_token: str + :param next_token: A maximum of 500 metrics will be returned + at one time. If more results are available, the ResultSet + returned will contain a non-Null next_token attribute. + Passing that token as a parameter to list_metrics will + retrieve the next page of metrics. + + :type dimensions: dict + :param dimensions: A dictionary containing name/value + pairs that will be used to filter the results. The key in + the dictionary is the name of a Dimension. The value in + the dictionary is either a scalar value of that Dimension + name that you want to filter on or None if you want all + metrics with that Dimension name. To be included in the + result a metric must contain all specified dimensions, + although the metric may contain additional dimensions beyond + the requested metrics. The Dimension names, and values must + be strings between 1 and 250 characters long. A maximum of + 10 dimensions are allowed. + + :type metric_name: str + :param metric_name: The name of the Metric to filter against. If None, + all Metric names will be returned. + + :type namespace: str + :param namespace: A Metric namespace to filter against (e.g. AWS/EC2). + If None, Metrics from all namespaces will be returned. + """ + params = {} + if next_token: + params['NextToken'] = next_token + if dimensions: + self.build_dimension_param(dimensions, params) + if metric_name: + params['MetricName'] = metric_name + if namespace: + params['Namespace'] = namespace + + return self.get_list('ListMetrics', params, [('member', Metric)]) + + def put_metric_data(self, namespace, name, value=None, timestamp=None, + unit=None, dimensions=None, statistics=None): + """ + Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch + associates the data points with the specified metric. If the specified + metric does not exist, Amazon CloudWatch creates the metric. If a list + is specified for some, but not all, of the arguments, the remaining + arguments are repeated a corresponding number of times. + + :type namespace: str + :param namespace: The namespace of the metric. + + :type name: str or list + :param name: The name of the metric. + + :type value: float or list + :param value: The value for the metric. + + :type timestamp: datetime or list + :param timestamp: The time stamp used for the metric. If not specified, + the default value is set to the time the metric data was received. + + :type unit: string or list + :param unit: The unit of the metric. Valid Values: Seconds | + Microseconds | Milliseconds | Bytes | Kilobytes | + Megabytes | Gigabytes | Terabytes | Bits | Kilobits | + Megabits | Gigabits | Terabits | Percent | Count | + Bytes/Second | Kilobytes/Second | Megabytes/Second | + Gigabytes/Second | Terabytes/Second | Bits/Second | + Kilobits/Second | Megabits/Second | Gigabits/Second | + Terabits/Second | Count/Second | None + + :type dimensions: dict + :param dimensions: Add extra name value pairs to associate + with the metric, i.e.: + {'name1': value1, 'name2': (value2, value3)} + + :type statistics: dict or list + :param statistics: Use a statistic set instead of a value, for example:: + + {'maximum': 30, 'minimum': 1, 'samplecount': 100, 'sum': 10000} + """ + params = {'Namespace': namespace} + self.build_put_params(params, name, value=value, timestamp=timestamp, + unit=unit, dimensions=dimensions, statistics=statistics) + + return self.get_status('PutMetricData', params, verb="POST") + + def describe_alarms(self, action_prefix=None, alarm_name_prefix=None, + alarm_names=None, max_records=None, state_value=None, + next_token=None): + """ + Retrieves alarms with the specified names. If no name is specified, all + alarms for the user are returned. Alarms can be retrieved by using only + a prefix for the alarm name, the alarm state, or a prefix for any + action. + + :type action_prefix: string + :param action_name: The action name prefix. + + :type alarm_name_prefix: string + :param alarm_name_prefix: The alarm name prefix. AlarmNames cannot + be specified if this parameter is specified. + + :type alarm_names: list + :param alarm_names: A list of alarm names to retrieve information for. + + :type max_records: int + :param max_records: The maximum number of alarm descriptions + to retrieve. + + :type state_value: string + :param state_value: The state value to be used in matching alarms. + + :type next_token: string + :param next_token: The token returned by a previous call to + indicate that there is more data. + + :rtype list + """ + params = {} + if action_prefix: + params['ActionPrefix'] = action_prefix + if alarm_name_prefix: + params['AlarmNamePrefix'] = alarm_name_prefix + elif alarm_names: + self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + if state_value: + params['StateValue'] = state_value + + result = self.get_list('DescribeAlarms', params, + [('MetricAlarms', MetricAlarms)]) + ret = result[0] + ret.next_token = result.next_token + return ret + + def describe_alarm_history(self, alarm_name=None, + start_date=None, end_date=None, + max_records=None, history_item_type=None, + next_token=None): + """ + Retrieves history for the specified alarm. Filter alarms by date range + or item type. If an alarm name is not specified, Amazon CloudWatch + returns histories for all of the owner's alarms. + + Amazon CloudWatch retains the history of deleted alarms for a period of + six weeks. If an alarm has been deleted, its history can still be + queried. + + :type alarm_name: string + :param alarm_name: The name of the alarm. + + :type start_date: datetime + :param start_date: The starting date to retrieve alarm history. + + :type end_date: datetime + :param end_date: The starting date to retrieve alarm history. + + :type history_item_type: string + :param history_item_type: The type of alarm histories to retreive + (ConfigurationUpdate | StateUpdate | Action) + + :type max_records: int + :param max_records: The maximum number of alarm descriptions + to retrieve. + + :type next_token: string + :param next_token: The token returned by a previous call to indicate + that there is more data. + + :rtype list + """ + params = {} + if alarm_name: + params['AlarmName'] = alarm_name + if start_date: + params['StartDate'] = start_date.isoformat() + if end_date: + params['EndDate'] = end_date.isoformat() + if history_item_type: + params['HistoryItemType'] = history_item_type + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeAlarmHistory', params, + [('member', AlarmHistoryItem)]) + + def describe_alarms_for_metric(self, metric_name, namespace, period=None, + statistic=None, dimensions=None, unit=None): + """ + Retrieves all alarms for a single metric. Specify a statistic, period, + or unit to filter the set of alarms further. + + :type metric_name: string + :param metric_name: The name of the metric + + :type namespace: string + :param namespace: The namespace of the metric. + + :type period: int + :param period: The period in seconds over which the statistic + is applied. + + :type statistic: string + :param statistic: The statistic for the metric. + + :param dimension_filters: A dictionary containing name/value + pairs that will be used to filter the results. The key in + the dictionary is the name of a Dimension. The value in + the dictionary is either a scalar value of that Dimension + name that you want to filter on, a list of values to + filter on or None if you want all metrics with that + Dimension name. + + :type unit: string + + :rtype list + """ + params = {'MetricName': metric_name, + 'Namespace': namespace} + if period: + params['Period'] = period + if statistic: + params['Statistic'] = statistic + if dimensions: + self.build_dimension_param(dimensions, params) + if unit: + params['Unit'] = unit + return self.get_list('DescribeAlarmsForMetric', params, + [('member', MetricAlarm)]) + + def put_metric_alarm(self, alarm): + """ + Creates or updates an alarm and associates it with the specified Amazon + CloudWatch metric. Optionally, this operation can associate one or more + Amazon Simple Notification Service resources with the alarm. + + When this operation creates an alarm, the alarm state is immediately + set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is + set appropriately. Any actions associated with the StateValue is then + executed. + + When updating an existing alarm, its StateValue is left unchanged. + + :type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm + :param alarm: MetricAlarm object. + """ + params = { + 'AlarmName': alarm.name, + 'MetricName': alarm.metric, + 'Namespace': alarm.namespace, + 'Statistic': alarm.statistic, + 'ComparisonOperator': alarm.comparison, + 'Threshold': alarm.threshold, + 'EvaluationPeriods': alarm.evaluation_periods, + 'Period': alarm.period, + } + if alarm.actions_enabled is not None: + params['ActionsEnabled'] = alarm.actions_enabled + if alarm.alarm_actions: + self.build_list_params(params, alarm.alarm_actions, + 'AlarmActions.member.%s') + if alarm.description: + params['AlarmDescription'] = alarm.description + if alarm.dimensions: + self.build_dimension_param(alarm.dimensions, params) + if alarm.insufficient_data_actions: + self.build_list_params(params, alarm.insufficient_data_actions, + 'InsufficientDataActions.member.%s') + if alarm.ok_actions: + self.build_list_params(params, alarm.ok_actions, + 'OKActions.member.%s') + if alarm.unit: + params['Unit'] = alarm.unit + alarm.connection = self + return self.get_status('PutMetricAlarm', params) + create_alarm = put_metric_alarm + update_alarm = put_metric_alarm + + def delete_alarms(self, alarms): + """ + Deletes all specified alarms. In the event of an error, no + alarms are deleted. + + :type alarms: list + :param alarms: List of alarm names. + """ + params = {} + self.build_list_params(params, alarms, 'AlarmNames.member.%s') + return self.get_status('DeleteAlarms', params) + + def set_alarm_state(self, alarm_name, state_reason, state_value, + state_reason_data=None): + """ + Temporarily sets the state of an alarm. When the updated StateValue + differs from the previous value, the action configured for the + appropriate state is invoked. This is not a permanent change. The next + periodic alarm check (in about a minute) will set the alarm to its + actual state. + + :type alarm_name: string + :param alarm_name: Descriptive name for alarm. + + :type state_reason: string + :param state_reason: Human readable reason. + + :type state_value: string + :param state_value: OK | ALARM | INSUFFICIENT_DATA + + :type state_reason_data: string + :param state_reason_data: Reason string (will be jsonified). + """ + params = {'AlarmName': alarm_name, + 'StateReason': state_reason, + 'StateValue': state_value} + if state_reason_data: + params['StateReasonData'] = json.dumps(state_reason_data) + + return self.get_status('SetAlarmState', params) + + def enable_alarm_actions(self, alarm_names): + """ + Enables actions for the specified alarms. + + :type alarms: list + :param alarms: List of alarm names. + """ + params = {} + self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') + return self.get_status('EnableAlarmActions', params) + + def disable_alarm_actions(self, alarm_names): + """ + Disables actions for the specified alarms. + + :type alarms: list + :param alarms: List of alarm names. + """ + params = {} + self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') + return self.get_status('DisableAlarmActions', params) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/alarm.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/alarm.py new file mode 100644 index 0000000000000000000000000000000000000000..c267ab0332a7d544dddd5df1d72dac0801af6bdf --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/alarm.py @@ -0,0 +1,323 @@ +# Copyright (c) 2010 Reza Lotun http://reza.lotun.name +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from datetime import datetime +from boto.ec2.cloudwatch.listelement import ListElement +from boto.ec2.cloudwatch.dimension import Dimension +from boto.compat import json +from boto.compat import six + + +class MetricAlarms(list): + def __init__(self, connection=None): + """ + Parses a list of MetricAlarms. + """ + list.__init__(self) + self.connection = connection + + def startElement(self, name, attrs, connection): + if name == 'member': + metric_alarm = MetricAlarm(connection) + self.append(metric_alarm) + return metric_alarm + + def endElement(self, name, value, connection): + pass + + +class MetricAlarm(object): + + OK = 'OK' + ALARM = 'ALARM' + INSUFFICIENT_DATA = 'INSUFFICIENT_DATA' + + _cmp_map = { + '>=': 'GreaterThanOrEqualToThreshold', + '>': 'GreaterThanThreshold', + '<': 'LessThanThreshold', + '<=': 'LessThanOrEqualToThreshold', + } + _rev_cmp_map = dict((v, k) for (k, v) in six.iteritems(_cmp_map)) + + def __init__(self, connection=None, name=None, metric=None, + namespace=None, statistic=None, comparison=None, + threshold=None, period=None, evaluation_periods=None, + unit=None, description='', dimensions=None, + alarm_actions=None, insufficient_data_actions=None, + ok_actions=None): + """ + Creates a new Alarm. + + :type name: str + :param name: Name of alarm. + + :type metric: str + :param metric: Name of alarm's associated metric. + + :type namespace: str + :param namespace: The namespace for the alarm's metric. + + :type statistic: str + :param statistic: The statistic to apply to the alarm's associated + metric. + Valid values: SampleCount|Average|Sum|Minimum|Maximum + + :type comparison: str + :param comparison: Comparison used to compare statistic with threshold. + Valid values: >= | > | < | <= + + :type threshold: float + :param threshold: The value against which the specified statistic + is compared. + + :type period: int + :param period: The period in seconds over which the specified + statistic is applied. + + :type evaluation_periods: int + :param evaluation_periods: The number of periods over which data is + compared to the specified threshold. + + :type unit: str + :param unit: Allowed Values are: + Seconds|Microseconds|Milliseconds, + Bytes|Kilobytes|Megabytes|Gigabytes|Terabytes, + Bits|Kilobits|Megabits|Gigabits|Terabits, + Percent|Count| + Bytes/Second|Kilobytes/Second|Megabytes/Second| + Gigabytes/Second|Terabytes/Second, + Bits/Second|Kilobits/Second|Megabits/Second, + Gigabits/Second|Terabits/Second|Count/Second|None + + :type description: str + :param description: Description of MetricAlarm + + :type dimensions: dict + :param dimensions: A dictionary of dimension key/values where + the key is the dimension name and the value + is either a scalar value or an iterator + of values to be associated with that + dimension. + Example: { + 'InstanceId': ['i-0123456', 'i-0123457'], + 'LoadBalancerName': 'test-lb' + } + + :type alarm_actions: list of strs + :param alarm_actions: A list of the ARNs of the actions to take in + ALARM state + + :type insufficient_data_actions: list of strs + :param insufficient_data_actions: A list of the ARNs of the actions to + take in INSUFFICIENT_DATA state + + :type ok_actions: list of strs + :param ok_actions: A list of the ARNs of the actions to take in OK state + """ + self.name = name + self.connection = connection + self.metric = metric + self.namespace = namespace + self.statistic = statistic + if threshold is not None: + self.threshold = float(threshold) + else: + self.threshold = None + self.comparison = self._cmp_map.get(comparison) + if period is not None: + self.period = int(period) + else: + self.period = None + if evaluation_periods is not None: + self.evaluation_periods = int(evaluation_periods) + else: + self.evaluation_periods = None + self.actions_enabled = None + self.alarm_arn = None + self.last_updated = None + self.description = description + self.dimensions = dimensions + self.state_reason = None + self.state_value = None + self.unit = unit + self.alarm_actions = alarm_actions + self.insufficient_data_actions = insufficient_data_actions + self.ok_actions = ok_actions + + def __repr__(self): + return 'MetricAlarm:%s[%s(%s) %s %s]' % (self.name, self.metric, + self.statistic, + self.comparison, + self.threshold) + + def startElement(self, name, attrs, connection): + if name == 'AlarmActions': + self.alarm_actions = ListElement() + return self.alarm_actions + elif name == 'InsufficientDataActions': + self.insufficient_data_actions = ListElement() + return self.insufficient_data_actions + elif name == 'OKActions': + self.ok_actions = ListElement() + return self.ok_actions + elif name == 'Dimensions': + self.dimensions = Dimension() + return self.dimensions + else: + pass + + def endElement(self, name, value, connection): + if name == 'ActionsEnabled': + self.actions_enabled = value + elif name == 'AlarmArn': + self.alarm_arn = value + elif name == 'AlarmConfigurationUpdatedTimestamp': + self.last_updated = value + elif name == 'AlarmDescription': + self.description = value + elif name == 'AlarmName': + self.name = value + elif name == 'ComparisonOperator': + setattr(self, 'comparison', self._rev_cmp_map[value]) + elif name == 'EvaluationPeriods': + self.evaluation_periods = int(value) + elif name == 'MetricName': + self.metric = value + elif name == 'Namespace': + self.namespace = value + elif name == 'Period': + self.period = int(value) + elif name == 'StateReason': + self.state_reason = value + elif name == 'StateValue': + self.state_value = value + elif name == 'Statistic': + self.statistic = value + elif name == 'Threshold': + self.threshold = float(value) + elif name == 'Unit': + self.unit = value + else: + setattr(self, name, value) + + def set_state(self, value, reason, data=None): + """ Temporarily sets the state of an alarm. + + :type value: str + :param value: OK | ALARM | INSUFFICIENT_DATA + + :type reason: str + :param reason: Reason alarm set (human readable). + + :type data: str + :param data: Reason data (will be jsonified). + """ + return self.connection.set_alarm_state(self.name, reason, value, data) + + def update(self): + return self.connection.update_alarm(self) + + def enable_actions(self): + return self.connection.enable_alarm_actions([self.name]) + + def disable_actions(self): + return self.connection.disable_alarm_actions([self.name]) + + def describe_history(self, start_date=None, end_date=None, max_records=None, + history_item_type=None, next_token=None): + return self.connection.describe_alarm_history(self.name, start_date, + end_date, max_records, + history_item_type, + next_token) + + def add_alarm_action(self, action_arn=None): + """ + Adds an alarm action, represented as an SNS topic, to this alarm. + What do do when alarm is triggered. + + :type action_arn: str + :param action_arn: SNS topics to which notification should be + sent if the alarm goes to state ALARM. + """ + if not action_arn: + return # Raise exception instead? + self.actions_enabled = 'true' + self.alarm_actions.append(action_arn) + + def add_insufficient_data_action(self, action_arn=None): + """ + Adds an insufficient_data action, represented as an SNS topic, to + this alarm. What to do when the insufficient_data state is reached. + + :type action_arn: str + :param action_arn: SNS topics to which notification should be + sent if the alarm goes to state INSUFFICIENT_DATA. + """ + if not action_arn: + return + self.actions_enabled = 'true' + self.insufficient_data_actions.append(action_arn) + + def add_ok_action(self, action_arn=None): + """ + Adds an ok action, represented as an SNS topic, to this alarm. What + to do when the ok state is reached. + + :type action_arn: str + :param action_arn: SNS topics to which notification should be + sent if the alarm goes to state INSUFFICIENT_DATA. + """ + if not action_arn: + return + self.actions_enabled = 'true' + self.ok_actions.append(action_arn) + + def delete(self): + self.connection.delete_alarms([self.name]) + + +class AlarmHistoryItem(object): + def __init__(self, connection=None): + self.connection = connection + + def __repr__(self): + return 'AlarmHistory:%s[%s at %s]' % (self.name, self.summary, self.timestamp) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'AlarmName': + self.name = value + elif name == 'HistoryData': + self.data = json.loads(value) + elif name == 'HistoryItemType': + self.tem_type = value + elif name == 'HistorySummary': + self.summary = value + elif name == 'Timestamp': + try: + self.timestamp = datetime.strptime(value, + '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/datapoint.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/datapoint.py new file mode 100644 index 0000000000000000000000000000000000000000..94955acd564605f72bff552a6a9d8fcf5a219ab0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/datapoint.py @@ -0,0 +1,40 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from datetime import datetime + + +class Datapoint(dict): + + def __init__(self, connection=None): + dict.__init__(self) + self.connection = connection + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name in ['Average', 'Maximum', 'Minimum', 'Sum', 'SampleCount']: + self[name] = float(value) + elif name == 'Timestamp': + self[name] = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + elif name != 'member': + self[name] = value diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/dimension.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/dimension.py new file mode 100644 index 0000000000000000000000000000000000000000..9ff4fb1160a1c4ea87787e16eb9e0e8896393b59 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/dimension.py @@ -0,0 +1,38 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class Dimension(dict): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Name': + self._name = value + elif name == 'Value': + if self._name in self: + self[self._name].append(value) + else: + self[self._name] = [value] + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/listelement.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/listelement.py new file mode 100644 index 0000000000000000000000000000000000000000..6a2904181bccf43bd9a479a436390391fb633f69 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/listelement.py @@ -0,0 +1,30 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class ListElement(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'member': + self.append(value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/metric.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/metric.py new file mode 100644 index 0000000000000000000000000000000000000000..f92f282a6598e56dedfd79f4fc1b1a4d32ce7047 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/metric.py @@ -0,0 +1,168 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.ec2.cloudwatch.alarm import MetricAlarm +from boto.ec2.cloudwatch.dimension import Dimension + + +class Metric(object): + + Statistics = ['Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount'] + Units = ['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', + 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', + 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', + 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', + 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', + 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', + 'Terabits/Second', 'Count/Second', None] + + def __init__(self, connection=None): + self.connection = connection + self.name = None + self.namespace = None + self.dimensions = None + + def __repr__(self): + return 'Metric:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'Dimensions': + self.dimensions = Dimension() + return self.dimensions + + def endElement(self, name, value, connection): + if name == 'MetricName': + self.name = value + elif name == 'Namespace': + self.namespace = value + else: + setattr(self, name, value) + + def query(self, start_time, end_time, statistics, unit=None, period=60): + """ + :type start_time: datetime + :param start_time: The time stamp to use for determining the + first datapoint to return. The value specified is + inclusive; results include datapoints with the time stamp + specified. + + :type end_time: datetime + :param end_time: The time stamp to use for determining the + last datapoint to return. The value specified is + exclusive; results will include datapoints up to the time + stamp specified. + + :type statistics: list + :param statistics: A list of statistics names Valid values: + Average | Sum | SampleCount | Maximum | Minimum + + :type unit: string + :param unit: The unit for the metric. Value values are: + Seconds | Microseconds | Milliseconds | Bytes | Kilobytes | + Megabytes | Gigabytes | Terabytes | Bits | Kilobits | + Megabits | Gigabits | Terabits | Percent | Count | + Bytes/Second | Kilobytes/Second | Megabytes/Second | + Gigabytes/Second | Terabytes/Second | Bits/Second | + Kilobits/Second | Megabits/Second | Gigabits/Second | + Terabits/Second | Count/Second | None + + :type period: integer + :param period: The granularity, in seconds, of the returned datapoints. + Period must be at least 60 seconds and must be a multiple + of 60. The default value is 60. + + """ + if not isinstance(statistics, list): + statistics = [statistics] + return self.connection.get_metric_statistics(period, + start_time, + end_time, + self.name, + self.namespace, + statistics, + self.dimensions, + unit) + + def create_alarm(self, name, comparison, threshold, + period, evaluation_periods, + statistic, enabled=True, description=None, + dimensions=None, alarm_actions=None, ok_actions=None, + insufficient_data_actions=None, unit=None): + """ + Creates or updates an alarm and associates it with this metric. + Optionally, this operation can associate one or more + Amazon Simple Notification Service resources with the alarm. + + When this operation creates an alarm, the alarm state is immediately + set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is + set appropriately. Any actions associated with the StateValue is then + executed. + + When updating an existing alarm, its StateValue is left unchanged. + + :type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm + :param alarm: MetricAlarm object. + """ + if not dimensions: + dimensions = self.dimensions + alarm = MetricAlarm(self.connection, name, self.name, + self.namespace, statistic, comparison, + threshold, period, evaluation_periods, + unit, description, dimensions, + alarm_actions, insufficient_data_actions, + ok_actions) + if self.connection.put_metric_alarm(alarm): + return alarm + + def describe_alarms(self, period=None, statistic=None, + dimensions=None, unit=None): + """ + Retrieves all alarms for this metric. Specify a statistic, period, + or unit to filter the set of alarms further. + + :type period: int + :param period: The period in seconds over which the statistic + is applied. + + :type statistic: string + :param statistic: The statistic for the metric. + + :param dimension_filters: A dictionary containing name/value + pairs that will be used to filter the results. The key in + the dictionary is the name of a Dimension. The value in + the dictionary is either a scalar value of that Dimension + name that you want to filter on, a list of values to + filter on or None if you want all metrics with that + Dimension name. + + :type unit: string + + :rtype list + """ + return self.connection.describe_alarms_for_metric(self.name, + self.namespace, + period, + statistic, + dimensions, + unit) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..210de7bbdede419247a3257cb0cb466e27ec380a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/connection.py @@ -0,0 +1,4506 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a connection to the EC2 service. +""" + +import base64 +import warnings +from datetime import datetime +from datetime import timedelta + +import boto +from boto.auth import detect_potential_sigv4 +from boto.connection import AWSQueryConnection +from boto.resultset import ResultSet +from boto.ec2.image import Image, ImageAttribute, CopyImage +from boto.ec2.instance import Reservation, Instance +from boto.ec2.instance import ConsoleOutput, InstanceAttribute +from boto.ec2.keypair import KeyPair +from boto.ec2.address import Address +from boto.ec2.volume import Volume, VolumeAttribute +from boto.ec2.snapshot import Snapshot +from boto.ec2.snapshot import SnapshotAttribute +from boto.ec2.zone import Zone +from boto.ec2.securitygroup import SecurityGroup +from boto.ec2.regioninfo import RegionInfo +from boto.ec2.instanceinfo import InstanceInfo +from boto.ec2.reservedinstance import ReservedInstancesOffering +from boto.ec2.reservedinstance import ReservedInstance +from boto.ec2.reservedinstance import ReservedInstanceListing +from boto.ec2.reservedinstance import ReservedInstancesConfiguration +from boto.ec2.reservedinstance import ModifyReservedInstancesResult +from boto.ec2.reservedinstance import ReservedInstancesModification +from boto.ec2.spotinstancerequest import SpotInstanceRequest +from boto.ec2.spotpricehistory import SpotPriceHistory +from boto.ec2.spotdatafeedsubscription import SpotDatafeedSubscription +from boto.ec2.bundleinstance import BundleInstanceTask +from boto.ec2.placementgroup import PlacementGroup +from boto.ec2.tag import Tag +from boto.ec2.instancetype import InstanceType +from boto.ec2.instancestatus import InstanceStatusSet +from boto.ec2.volumestatus import VolumeStatusSet +from boto.ec2.networkinterface import NetworkInterface +from boto.ec2.attributes import AccountAttribute, VPCAttribute +from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType +from boto.exception import EC2ResponseError +from boto.compat import six + +#boto.set_stream_logger('ec2') + + +class EC2Connection(AWSQueryConnection): + + APIVersion = boto.config.get('Boto', 'ec2_version', '2014-10-01') + DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint', + 'ec2.us-east-1.amazonaws.com') + ResponseError = EC2ResponseError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, host=None, port=None, + proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + api_version=None, security_token=None, + validate_certs=True, profile_name=None): + """ + Init method to create a new connection to EC2. + """ + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(EC2Connection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + if api_version: + self.APIVersion = api_version + + def _required_auth_capability(self): + return ['hmac-v4'] + + def get_params(self): + """ + Returns a dictionary containing the value of all of the keyword + arguments passed when constructing this connection. + """ + param_names = ['aws_access_key_id', 'aws_secret_access_key', + 'is_secure', 'port', 'proxy', 'proxy_port', + 'proxy_user', 'proxy_pass', + 'debug', 'https_connection_factory'] + params = {} + for name in param_names: + params[name] = getattr(self, name) + return params + + def build_filter_params(self, params, filters): + if not isinstance(filters, dict): + filters = dict(filters) + + i = 1 + for name in filters: + aws_name = name + if not aws_name.startswith('tag:'): + aws_name = name.replace('_', '-') + params['Filter.%d.Name' % i] = aws_name + value = filters[name] + if not isinstance(value, list): + value = [value] + j = 1 + for v in value: + params['Filter.%d.Value.%d' % (i, j)] = v + j += 1 + i += 1 + + # Image methods + + def get_all_images(self, image_ids=None, owners=None, + executable_by=None, filters=None, dry_run=False): + """ + Retrieve all the EC2 images available on your account. + + :type image_ids: list + :param image_ids: A list of strings with the image IDs wanted + + :type owners: list + :param owners: A list of owner IDs, the special strings 'self', + 'amazon', and 'aws-marketplace', may be used to describe + images owned by you, Amazon or AWS Marketplace + respectively + + :type executable_by: list + :param executable_by: Returns AMIs for which the specified + user ID has explicit launch permissions + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.image.Image` + """ + params = {} + if image_ids: + self.build_list_params(params, image_ids, 'ImageId') + if owners: + self.build_list_params(params, owners, 'Owner') + if executable_by: + self.build_list_params(params, executable_by, 'ExecutableBy') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeImages', params, + [('item', Image)], verb='POST') + + def get_all_kernels(self, kernel_ids=None, owners=None, dry_run=False): + """ + Retrieve all the EC2 kernels available on your account. + Constructs a filter to allow the processing to happen server side. + + :type kernel_ids: list + :param kernel_ids: A list of strings with the image IDs wanted + + :type owners: list + :param owners: A list of owner IDs + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.image.Image` + """ + params = {} + if kernel_ids: + self.build_list_params(params, kernel_ids, 'ImageId') + if owners: + self.build_list_params(params, owners, 'Owner') + filter = {'image-type': 'kernel'} + self.build_filter_params(params, filter) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeImages', params, + [('item', Image)], verb='POST') + + def get_all_ramdisks(self, ramdisk_ids=None, owners=None, dry_run=False): + """ + Retrieve all the EC2 ramdisks available on your account. + Constructs a filter to allow the processing to happen server side. + + :type ramdisk_ids: list + :param ramdisk_ids: A list of strings with the image IDs wanted + + :type owners: list + :param owners: A list of owner IDs + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.image.Image` + """ + params = {} + if ramdisk_ids: + self.build_list_params(params, ramdisk_ids, 'ImageId') + if owners: + self.build_list_params(params, owners, 'Owner') + filter = {'image-type': 'ramdisk'} + self.build_filter_params(params, filter) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeImages', params, + [('item', Image)], verb='POST') + + def get_image(self, image_id, dry_run=False): + """ + Shortcut method to retrieve a specific image (AMI). + + :type image_id: string + :param image_id: the ID of the Image to retrieve + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.image.Image` + :return: The EC2 Image specified or None if the image is not found + """ + try: + return self.get_all_images(image_ids=[image_id], dry_run=dry_run)[0] + except IndexError: # None of those images available + return None + + def register_image(self, name=None, description=None, image_location=None, + architecture=None, kernel_id=None, ramdisk_id=None, + root_device_name=None, block_device_map=None, + dry_run=False, virtualization_type=None, + sriov_net_support=None, + snapshot_id=None, + delete_root_volume_on_termination=False): + """ + Register an image. + + :type name: string + :param name: The name of the AMI. Valid only for EBS-based images. + + :type description: string + :param description: The description of the AMI. + + :type image_location: string + :param image_location: Full path to your AMI manifest in + Amazon S3 storage. Only used for S3-based AMI's. + + :type architecture: string + :param architecture: The architecture of the AMI. Valid choices are: + * i386 + * x86_64 + + :type kernel_id: string + :param kernel_id: The ID of the kernel with which to launch + the instances + + :type root_device_name: string + :param root_device_name: The root device name (e.g. /dev/sdh) + + :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_map: A BlockDeviceMapping data structure + describing the EBS volumes associated with the Image. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type virtualization_type: string + :param virtualization_type: The virutalization_type of the image. + Valid choices are: + * paravirtual + * hvm + + :type sriov_net_support: string + :param sriov_net_support: Advanced networking support. + Valid choices are: + * simple + + :type snapshot_id: string + :param snapshot_id: A snapshot ID for the snapshot to be used + as root device for the image. Mutually exclusive with + block_device_map, requires root_device_name + + :type delete_root_volume_on_termination: bool + :param delete_root_volume_on_termination: Whether to delete the root + volume of the image after instance termination. Only applies when + creating image from snapshot_id. Defaults to False. Note that + leaving volumes behind after instance termination is not free. + + :rtype: string + :return: The new image id + """ + params = {} + if name: + params['Name'] = name + if description: + params['Description'] = description + if architecture: + params['Architecture'] = architecture + if kernel_id: + params['KernelId'] = kernel_id + if ramdisk_id: + params['RamdiskId'] = ramdisk_id + if image_location: + params['ImageLocation'] = image_location + if root_device_name: + params['RootDeviceName'] = root_device_name + if snapshot_id: + root_vol = BlockDeviceType(snapshot_id=snapshot_id, + delete_on_termination=delete_root_volume_on_termination) + block_device_map = BlockDeviceMapping() + block_device_map[root_device_name] = root_vol + if block_device_map: + block_device_map.ec2_build_list_params(params) + if dry_run: + params['DryRun'] = 'true' + if virtualization_type: + params['VirtualizationType'] = virtualization_type + if sriov_net_support: + params['SriovNetSupport'] = sriov_net_support + + rs = self.get_object('RegisterImage', params, ResultSet, verb='POST') + image_id = getattr(rs, 'imageId', None) + return image_id + + def deregister_image(self, image_id, delete_snapshot=False, dry_run=False): + """ + Unregister an AMI. + + :type image_id: string + :param image_id: the ID of the Image to unregister + + :type delete_snapshot: bool + :param delete_snapshot: Set to True if we should delete the + snapshot associated with an EBS volume mounted at /dev/sda1 + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + snapshot_id = None + if delete_snapshot: + image = self.get_image(image_id) + for key in image.block_device_mapping: + if key == "/dev/sda1": + snapshot_id = image.block_device_mapping[key].snapshot_id + break + params = { + 'ImageId': image_id, + } + if dry_run: + params['DryRun'] = 'true' + result = self.get_status('DeregisterImage', + params, verb='POST') + if result and snapshot_id: + return result and self.delete_snapshot(snapshot_id) + return result + + def create_image(self, instance_id, name, + description=None, no_reboot=False, + block_device_mapping=None, dry_run=False): + """ + Will create an AMI from the instance in the running or stopped + state. + + :type instance_id: string + :param instance_id: the ID of the instance to image. + + :type name: string + :param name: The name of the new image + + :type description: string + :param description: An optional human-readable string describing + the contents and purpose of the AMI. + + :type no_reboot: bool + :param no_reboot: An optional flag indicating that the + bundling process should not attempt to shutdown the + instance before bundling. If this flag is True, the + responsibility of maintaining file system integrity is + left to the owner of the instance. + + :type block_device_mapping: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_mapping: A BlockDeviceMapping data structure + describing the EBS volumes associated with the Image. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: string + :return: The new image id + """ + params = {'InstanceId': instance_id, + 'Name': name} + if description: + params['Description'] = description + if no_reboot: + params['NoReboot'] = 'true' + if block_device_mapping: + block_device_mapping.ec2_build_list_params(params) + if dry_run: + params['DryRun'] = 'true' + img = self.get_object('CreateImage', params, Image, verb='POST') + return img.id + + # ImageAttribute methods + + def get_image_attribute(self, image_id, attribute='launchPermission', + dry_run=False): + """ + Gets an attribute from an image. + + :type image_id: string + :param image_id: The Amazon image id for which you want info about + + :type attribute: string + :param attribute: The attribute you need information about. + Valid choices are: + * launchPermission + * productCodes + * blockDeviceMapping + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.image.ImageAttribute` + :return: An ImageAttribute object representing the value of the + attribute requested + """ + params = {'ImageId': image_id, + 'Attribute': attribute} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeImageAttribute', params, + ImageAttribute, verb='POST') + + def modify_image_attribute(self, image_id, attribute='launchPermission', + operation='add', user_ids=None, groups=None, + product_codes=None, dry_run=False): + """ + Changes an attribute of an image. + + :type image_id: string + :param image_id: The image id you wish to change + + :type attribute: string + :param attribute: The attribute you wish to change + + :type operation: string + :param operation: Either add or remove (this is required for changing + launchPermissions) + + :type user_ids: list + :param user_ids: The Amazon IDs of users to add/remove attributes + + :type groups: list + :param groups: The groups to add/remove attributes + + :type product_codes: list + :param product_codes: Amazon DevPay product code. Currently only one + product code can be associated with an AMI. Once + set, the product code cannot be changed or reset. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'ImageId': image_id, + 'Attribute': attribute, + 'OperationType': operation} + if user_ids: + self.build_list_params(params, user_ids, 'UserId') + if groups: + self.build_list_params(params, groups, 'UserGroup') + if product_codes: + self.build_list_params(params, product_codes, 'ProductCode') + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifyImageAttribute', params, verb='POST') + + def reset_image_attribute(self, image_id, attribute='launchPermission', + dry_run=False): + """ + Resets an attribute of an AMI to its default value. + + :type image_id: string + :param image_id: ID of the AMI for which an attribute will be described + + :type attribute: string + :param attribute: The attribute to reset + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: Whether the operation succeeded or not + """ + params = {'ImageId': image_id, + 'Attribute': attribute} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ResetImageAttribute', params, verb='POST') + + # Instance methods + + def get_all_instances(self, instance_ids=None, filters=None, dry_run=False, + max_results=None): + """ + Retrieve all the instance reservations associated with your account. + + .. note:: + This method's current behavior is deprecated in favor of + :meth:`get_all_reservations`. A future major release will change + :meth:`get_all_instances` to return a list of + :class:`boto.ec2.instance.Instance` objects as its name suggests. + To obtain that behavior today, use :meth:`get_only_instances`. + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :rtype: list + :return: A list of :class:`boto.ec2.instance.Reservation` + + """ + warnings.warn(('The current get_all_instances implementation will be ' + 'replaced with get_all_reservations.'), + PendingDeprecationWarning) + return self.get_all_reservations(instance_ids=instance_ids, + filters=filters, dry_run=dry_run, + max_results=max_results) + + def get_only_instances(self, instance_ids=None, filters=None, + dry_run=False, max_results=None): + # A future release should rename this method to get_all_instances + # and make get_only_instances an alias for that. + """ + Retrieve all the instances associated with your account. + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :rtype: list + :return: A list of :class:`boto.ec2.instance.Instance` + """ + next_token = None + retval = [] + while True: + reservations = self.get_all_reservations(instance_ids=instance_ids, + filters=filters, + dry_run=dry_run, + max_results=max_results, + next_token=next_token) + retval.extend([instance for reservation in reservations for + instance in reservation.instances]) + next_token = reservations.next_token + if not next_token: + break + + return retval + + def get_all_reservations(self, instance_ids=None, filters=None, + dry_run=False, max_results=None, next_token=None): + """ + Retrieve all the instance reservations associated with your account. + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :type next_token: str + :param next_token: A string specifying the next paginated set + of results to return. + + :rtype: list + :return: A list of :class:`boto.ec2.instance.Reservation` + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if filters: + if 'group-id' in filters: + gid = filters.get('group-id') + if not gid.startswith('sg-') or len(gid) != 11: + warnings.warn( + "The group-id filter now requires a security group " + "identifier (sg-*) instead of a group name. To filter " + "by group name use the 'group-name' filter instead.", + UserWarning) + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + if max_results is not None: + params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeInstances', params, + [('item', Reservation)], verb='POST') + + def get_all_instance_status(self, instance_ids=None, + max_results=None, next_token=None, + filters=None, dry_run=False, + include_all_instances=False): + """ + Retrieve all the instances in your account scheduled for maintenance. + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :type next_token: str + :param next_token: A string specifying the next paginated set + of results to return. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type include_all_instances: bool + :param include_all_instances: Set to True if all + instances should be returned. (Only running + instances are included by default.) + + :rtype: list + :return: A list of instances that have maintenance scheduled. + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if max_results: + params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + if include_all_instances: + params['IncludeAllInstances'] = 'true' + return self.get_object('DescribeInstanceStatus', params, + InstanceStatusSet, verb='POST') + + def run_instances(self, image_id, min_count=1, max_count=1, + key_name=None, security_groups=None, + user_data=None, addressing_type=None, + instance_type='m1.small', placement=None, + kernel_id=None, ramdisk_id=None, + monitoring_enabled=False, subnet_id=None, + block_device_map=None, + disable_api_termination=False, + instance_initiated_shutdown_behavior=None, + private_ip_address=None, + placement_group=None, client_token=None, + security_group_ids=None, + additional_info=None, instance_profile_name=None, + instance_profile_arn=None, tenancy=None, + ebs_optimized=False, network_interfaces=None, + dry_run=False): + """ + Runs an image on EC2. + + :type image_id: string + :param image_id: The ID of the image to run. + + :type min_count: int + :param min_count: The minimum number of instances to launch. + + :type max_count: int + :param max_count: The maximum number of instances to launch. + + :type key_name: string + :param key_name: The name of the key pair with which to + launch instances. + + :type security_groups: list of strings + :param security_groups: The names of the EC2 classic security groups + with which to associate instances + + :type user_data: string + :param user_data: The Base64-encoded MIME user data to be made + available to the instance(s) in this reservation. + + :type instance_type: string + :param instance_type: The type of instance to run: + + * t1.micro + * m1.small + * m1.medium + * m1.large + * m1.xlarge + * m3.medium + * m3.large + * m3.xlarge + * m3.2xlarge + * c1.medium + * c1.xlarge + * m2.xlarge + * m2.2xlarge + * m2.4xlarge + * cr1.8xlarge + * hi1.4xlarge + * hs1.8xlarge + * cc1.4xlarge + * cg1.4xlarge + * cc2.8xlarge + * g2.2xlarge + * c3.large + * c3.xlarge + * c3.2xlarge + * c3.4xlarge + * c3.8xlarge + * i2.xlarge + * i2.2xlarge + * i2.4xlarge + * i2.8xlarge + * t2.micro + * t2.small + * t2.medium + + :type placement: string + :param placement: The Availability Zone to launch the instance into. + + :type kernel_id: string + :param kernel_id: The ID of the kernel with which to launch the + instances. + + :type ramdisk_id: string + :param ramdisk_id: The ID of the RAM disk with which to launch the + instances. + + :type monitoring_enabled: bool + :param monitoring_enabled: Enable detailed CloudWatch monitoring on + the instance. + + :type subnet_id: string + :param subnet_id: The subnet ID within which to launch the instances + for VPC. + + :type private_ip_address: string + :param private_ip_address: If you're using VPC, you can + optionally use this parameter to assign the instance a + specific available IP address from the subnet (e.g., + 10.0.0.25). + + :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_map: A BlockDeviceMapping data structure + describing the EBS volumes associated with the Image. + + :type disable_api_termination: bool + :param disable_api_termination: If True, the instances will be locked + and will not be able to be terminated via the API. + + :type instance_initiated_shutdown_behavior: string + :param instance_initiated_shutdown_behavior: Specifies whether the + instance stops or terminates on instance-initiated shutdown. + Valid values are: + + * stop + * terminate + + :type placement_group: string + :param placement_group: If specified, this is the name of the placement + group in which the instance(s) will be launched. + + :type client_token: string + :param client_token: Unique, case-sensitive identifier you provide + to ensure idempotency of the request. Maximum 64 ASCII characters. + + :type security_group_ids: list of strings + :param security_group_ids: The ID of the VPC security groups with + which to associate instances. + + :type additional_info: string + :param additional_info: Specifies additional information to make + available to the instance(s). + + :type tenancy: string + :param tenancy: The tenancy of the instance you want to + launch. An instance with a tenancy of 'dedicated' runs on + single-tenant hardware and can only be launched into a + VPC. Valid values are:"default" or "dedicated". + NOTE: To use dedicated tenancy you MUST specify a VPC + subnet-ID as well. + + :type instance_profile_arn: string + :param instance_profile_arn: The Amazon resource name (ARN) of + the IAM Instance Profile (IIP) to associate with the instances. + + :type instance_profile_name: string + :param instance_profile_name: The name of + the IAM Instance Profile (IIP) to associate with the instances. + + :type ebs_optimized: bool + :param ebs_optimized: Whether the instance is optimized for + EBS I/O. This optimization provides dedicated throughput + to Amazon EBS and an optimized configuration stack to + provide optimal EBS I/O performance. This optimization + isn't available with all instance types. + + :type network_interfaces: :class:`boto.ec2.networkinterface.NetworkInterfaceCollection` + :param network_interfaces: A NetworkInterfaceCollection data + structure containing the ENI specifications for the instance. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Reservation + :return: The :class:`boto.ec2.instance.Reservation` associated with + the request for machines + """ + params = {'ImageId': image_id, + 'MinCount': min_count, + 'MaxCount': max_count} + if key_name: + params['KeyName'] = key_name + if security_group_ids: + l = [] + for group in security_group_ids: + if isinstance(group, SecurityGroup): + l.append(group.id) + else: + l.append(group) + self.build_list_params(params, l, 'SecurityGroupId') + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, SecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, 'SecurityGroup') + if user_data: + if isinstance(user_data, six.text_type): + user_data = user_data.encode('utf-8') + params['UserData'] = base64.b64encode(user_data).decode('utf-8') + if addressing_type: + params['AddressingType'] = addressing_type + if instance_type: + params['InstanceType'] = instance_type + if placement: + params['Placement.AvailabilityZone'] = placement + if placement_group: + params['Placement.GroupName'] = placement_group + if tenancy: + params['Placement.Tenancy'] = tenancy + if kernel_id: + params['KernelId'] = kernel_id + if ramdisk_id: + params['RamdiskId'] = ramdisk_id + if monitoring_enabled: + params['Monitoring.Enabled'] = 'true' + if subnet_id: + params['SubnetId'] = subnet_id + if private_ip_address: + params['PrivateIpAddress'] = private_ip_address + if block_device_map: + block_device_map.ec2_build_list_params(params) + if disable_api_termination: + params['DisableApiTermination'] = 'true' + if instance_initiated_shutdown_behavior: + val = instance_initiated_shutdown_behavior + params['InstanceInitiatedShutdownBehavior'] = val + if client_token: + params['ClientToken'] = client_token + if additional_info: + params['AdditionalInfo'] = additional_info + if instance_profile_name: + params['IamInstanceProfile.Name'] = instance_profile_name + if instance_profile_arn: + params['IamInstanceProfile.Arn'] = instance_profile_arn + if ebs_optimized: + params['EbsOptimized'] = 'true' + if network_interfaces: + network_interfaces.build_list_params(params) + if dry_run: + params['DryRun'] = 'true' + return self.get_object('RunInstances', params, Reservation, + verb='POST') + + def terminate_instances(self, instance_ids=None, dry_run=False): + """ + Terminate the instances specified + + :type instance_ids: list + :param instance_ids: A list of strings of the Instance IDs to terminate + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of the instances terminated + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('TerminateInstances', params, + [('item', Instance)], verb='POST') + + def stop_instances(self, instance_ids=None, force=False, dry_run=False): + """ + Stop the instances specified + + :type instance_ids: list + :param instance_ids: A list of strings of the Instance IDs to stop + + :type force: bool + :param force: Forces the instance to stop + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of the instances stopped + """ + params = {} + if force: + params['Force'] = 'true' + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('StopInstances', params, + [('item', Instance)], verb='POST') + + def start_instances(self, instance_ids=None, dry_run=False): + """ + Start the instances specified + + :type instance_ids: list + :param instance_ids: A list of strings of the Instance IDs to start + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of the instances started + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('StartInstances', params, + [('item', Instance)], verb='POST') + + def get_console_output(self, instance_id, dry_run=False): + """ + Retrieves the console output for the specified instance. + + :type instance_id: string + :param instance_id: The instance ID of a running instance on the cloud. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.instance.ConsoleOutput` + :return: The console output as a ConsoleOutput object + """ + params = {} + self.build_list_params(params, [instance_id], 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_object('GetConsoleOutput', params, + ConsoleOutput, verb='POST') + + def reboot_instances(self, instance_ids=None, dry_run=False): + """ + Reboot the specified instances. + + :type instance_ids: list + :param instance_ids: The instances to terminate and reboot + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_status('RebootInstances', params) + + def confirm_product_instance(self, product_code, instance_id, + dry_run=False): + """ + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'ProductCode': product_code, + 'InstanceId': instance_id} + if dry_run: + params['DryRun'] = 'true' + rs = self.get_object('ConfirmProductInstance', params, + ResultSet, verb='POST') + return (rs.status, rs.ownerId) + + # InstanceAttribute methods + + def get_instance_attribute(self, instance_id, attribute, dry_run=False): + """ + Gets an attribute from an instance. + + :type instance_id: string + :param instance_id: The Amazon id of the instance + + :type attribute: string + :param attribute: The attribute you need information about + Valid choices are: + + * instanceType + * kernel + * ramdisk + * userData + * disableApiTermination + * instanceInitiatedShutdownBehavior + * rootDeviceName + * blockDeviceMapping + * productCodes + * sourceDestCheck + * groupSet + * ebsOptimized + * sriovNetSupport + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.image.InstanceAttribute` + :return: An InstanceAttribute object representing the value of the + attribute requested + """ + params = {'InstanceId': instance_id} + if attribute: + params['Attribute'] = attribute + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeInstanceAttribute', params, + InstanceAttribute, verb='POST') + + def modify_network_interface_attribute(self, interface_id, attr, value, + attachment_id=None, dry_run=False): + """ + Changes an attribute of a network interface. + + :type interface_id: string + :param interface_id: The interface id. Looks like 'eni-xxxxxxxx' + + :type attr: string + :param attr: The attribute you wish to change. + + Learn more at http://docs.aws.amazon.com/AWSEC2/latest/API\ + Reference/ApiReference-query-ModifyNetworkInterfaceAttribute.html + + * description - Textual description of interface + * groupSet - List of security group ids or group objects + * sourceDestCheck - Boolean + * deleteOnTermination - Boolean. Must also specify attachment_id + + :type value: string + :param value: The new value for the attribute + + :rtype: bool + :return: Whether the operation succeeded or not + + :type attachment_id: string + :param attachment_id: If you're modifying DeleteOnTermination you must + specify the attachment_id. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + bool_reqs = ( + 'deleteontermination', + 'sourcedestcheck', + ) + if attr.lower() in bool_reqs: + if isinstance(value, bool): + if value: + value = 'true' + else: + value = 'false' + elif value not in ['true', 'false']: + raise ValueError('%s must be a boolean, "true", or "false"!' + % attr) + + params = {'NetworkInterfaceId': interface_id} + + # groupSet is handled differently from other arguments + if attr.lower() == 'groupset': + for idx, sg in enumerate(value): + if isinstance(sg, SecurityGroup): + sg = sg.id + params['SecurityGroupId.%s' % (idx + 1)] = sg + elif attr.lower() == 'description': + params['Description.Value'] = value + elif attr.lower() == 'sourcedestcheck': + params['SourceDestCheck.Value'] = value + elif attr.lower() == 'deleteontermination': + params['Attachment.DeleteOnTermination'] = value + if not attachment_id: + raise ValueError('You must also specify an attachment_id') + params['Attachment.AttachmentId'] = attachment_id + else: + raise ValueError('Unknown attribute "%s"' % (attr,)) + + if dry_run: + params['DryRun'] = 'true' + return self.get_status( + 'ModifyNetworkInterfaceAttribute', params, verb='POST') + + def modify_instance_attribute(self, instance_id, attribute, value, + dry_run=False): + """ + Changes an attribute of an instance + + :type instance_id: string + :param instance_id: The instance id you wish to change + + :type attribute: string + :param attribute: The attribute you wish to change. + + * instanceType - A valid instance type (m1.small) + * kernel - Kernel ID (None) + * ramdisk - Ramdisk ID (None) + * userData - Base64 encoded String (None) + * disableApiTermination - Boolean (true) + * instanceInitiatedShutdownBehavior - stop|terminate + * blockDeviceMapping - List of strings - ie: ['/dev/sda=false'] + * sourceDestCheck - Boolean (true) + * groupSet - Set of Security Groups or IDs + * ebsOptimized - Boolean (false) + * sriovNetSupport - String - ie: 'simple' + + :type value: string + :param value: The new value for the attribute + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: Whether the operation succeeded or not + """ + # Allow a bool to be passed in for value of disableApiTermination + bool_reqs = ('disableapitermination', + 'sourcedestcheck', + 'ebsoptimized') + if attribute.lower() in bool_reqs: + if isinstance(value, bool): + if value: + value = 'true' + else: + value = 'false' + + params = {'InstanceId': instance_id} + + # groupSet is handled differently from other arguments + if attribute.lower() == 'groupset': + for idx, sg in enumerate(value): + if isinstance(sg, SecurityGroup): + sg = sg.id + params['GroupId.%s' % (idx + 1)] = sg + elif attribute.lower() == 'blockdevicemapping': + for idx, kv in enumerate(value): + dev_name, _, flag = kv.partition('=') + pre = 'BlockDeviceMapping.%d' % (idx + 1) + params['%s.DeviceName' % pre] = dev_name + params['%s.Ebs.DeleteOnTermination' % pre] = flag or 'true' + else: + # for backwards compatibility handle lowercase first letter + attribute = attribute[0].upper() + attribute[1:] + params['%s.Value' % attribute] = value + + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifyInstanceAttribute', params, verb='POST') + + def reset_instance_attribute(self, instance_id, attribute, dry_run=False): + """ + Resets an attribute of an instance to its default value. + + :type instance_id: string + :param instance_id: ID of the instance + + :type attribute: string + :param attribute: The attribute to reset. Valid values are: + kernel|ramdisk + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: Whether the operation succeeded or not + """ + params = {'InstanceId': instance_id, + 'Attribute': attribute} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ResetInstanceAttribute', params, verb='POST') + + # Spot Instances + + def get_all_spot_instance_requests(self, request_ids=None, + filters=None, dry_run=False): + """ + Retrieve all the spot instances requests associated with your account. + + :type request_ids: list + :param request_ids: A list of strings of spot instance request IDs + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of + :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest` + """ + params = {} + if request_ids: + self.build_list_params(params, request_ids, 'SpotInstanceRequestId') + if filters: + if 'launch.group-id' in filters: + lgid = filters.get('launch.group-id') + if not lgid.startswith('sg-') or len(lgid) != 11: + warnings.warn( + "The 'launch.group-id' filter now requires a security " + "group id (sg-*) and no longer supports filtering by " + "group name. Please update your filters accordingly.", + UserWarning) + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeSpotInstanceRequests', params, + [('item', SpotInstanceRequest)], verb='POST') + + def get_spot_price_history(self, start_time=None, end_time=None, + instance_type=None, product_description=None, + availability_zone=None, dry_run=False, + max_results=None, next_token=None, + filters=None): + """ + Retrieve the recent history of spot instances pricing. + + :type start_time: str + :param start_time: An indication of how far back to provide price + changes for. An ISO8601 DateTime string. + + :type end_time: str + :param end_time: An indication of how far forward to provide price + changes for. An ISO8601 DateTime string. + + :type instance_type: str + :param instance_type: Filter responses to a particular instance type. + + :type product_description: str + :param product_description: Filter responses to a particular platform. + Valid values are currently: + + * Linux/UNIX + * SUSE Linux + * Windows + * Linux/UNIX (Amazon VPC) + * SUSE Linux (Amazon VPC) + * Windows (Amazon VPC) + + :type availability_zone: str + :param availability_zone: The availability zone for which prices + should be returned. If not specified, data for all + availability zones will be returned. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated items + per response. + + :type next_token: str + :param next_token: The next set of rows to return. This should + be the value of the ``next_token`` attribute from a previous + call to ``get_spot_price_history``. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :rtype: list + :return: A list tuples containing price and timestamp. + """ + params = {} + if start_time: + params['StartTime'] = start_time + if end_time: + params['EndTime'] = end_time + if instance_type: + params['InstanceType'] = instance_type + if product_description: + params['ProductDescription'] = product_description + if availability_zone: + params['AvailabilityZone'] = availability_zone + if dry_run: + params['DryRun'] = 'true' + if max_results is not None: + params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + if filters: + self.build_filter_params(params, filters) + return self.get_list('DescribeSpotPriceHistory', params, + [('item', SpotPriceHistory)], verb='POST') + + def request_spot_instances(self, price, image_id, count=1, type='one-time', + valid_from=None, valid_until=None, + launch_group=None, availability_zone_group=None, + key_name=None, security_groups=None, + user_data=None, addressing_type=None, + instance_type='m1.small', placement=None, + kernel_id=None, ramdisk_id=None, + monitoring_enabled=False, subnet_id=None, + placement_group=None, + block_device_map=None, + instance_profile_arn=None, + instance_profile_name=None, + security_group_ids=None, + ebs_optimized=False, + network_interfaces=None, dry_run=False): + """ + Request instances on the spot market at a particular price. + + :type price: str + :param price: The maximum price of your bid + + :type image_id: string + :param image_id: The ID of the image to run + + :type count: int + :param count: The of instances to requested + + :type type: str + :param type: Type of request. Can be 'one-time' or 'persistent'. + Default is one-time. + + :type valid_from: str + :param valid_from: Start date of the request. An ISO8601 time string. + + :type valid_until: str + :param valid_until: End date of the request. An ISO8601 time string. + + :type launch_group: str + :param launch_group: If supplied, all requests will be fulfilled + as a group. + + :type availability_zone_group: str + :param availability_zone_group: If supplied, all requests will be + fulfilled within a single availability zone. + + :type key_name: string + :param key_name: The name of the key pair with which to + launch instances + + :type security_groups: list of strings + :param security_groups: The names of the security groups with which to + associate instances + + :type user_data: string + :param user_data: The user data passed to the launched instances + + :type instance_type: string + :param instance_type: The type of instance to run: + + * t1.micro + * m1.small + * m1.medium + * m1.large + * m1.xlarge + * m3.medium + * m3.large + * m3.xlarge + * m3.2xlarge + * c1.medium + * c1.xlarge + * m2.xlarge + * m2.2xlarge + * m2.4xlarge + * cr1.8xlarge + * hi1.4xlarge + * hs1.8xlarge + * cc1.4xlarge + * cg1.4xlarge + * cc2.8xlarge + * g2.2xlarge + * c3.large + * c3.xlarge + * c3.2xlarge + * c3.4xlarge + * c3.8xlarge + * i2.xlarge + * i2.2xlarge + * i2.4xlarge + * i2.8xlarge + * t2.micro + * t2.small + * t2.medium + + :type placement: string + :param placement: The availability zone in which to launch + the instances + + :type kernel_id: string + :param kernel_id: The ID of the kernel with which to launch the + instances + + :type ramdisk_id: string + :param ramdisk_id: The ID of the RAM disk with which to launch the + instances + + :type monitoring_enabled: bool + :param monitoring_enabled: Enable detailed CloudWatch monitoring on + the instance. + + :type subnet_id: string + :param subnet_id: The subnet ID within which to launch the instances + for VPC. + + :type placement_group: string + :param placement_group: If specified, this is the name of the placement + group in which the instance(s) will be launched. + + :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_map: A BlockDeviceMapping data structure + describing the EBS volumes associated with the Image. + + :type security_group_ids: list of strings + :param security_group_ids: The ID of the VPC security groups with + which to associate instances. + + :type instance_profile_arn: string + :param instance_profile_arn: The Amazon resource name (ARN) of + the IAM Instance Profile (IIP) to associate with the instances. + + :type instance_profile_name: string + :param instance_profile_name: The name of + the IAM Instance Profile (IIP) to associate with the instances. + + :type ebs_optimized: bool + :param ebs_optimized: Whether the instance is optimized for + EBS I/O. This optimization provides dedicated throughput + to Amazon EBS and an optimized configuration stack to + provide optimal EBS I/O performance. This optimization + isn't available with all instance types. + + :type network_interfaces: list + :param network_interfaces: A list of + :class:`boto.ec2.networkinterface.NetworkInterfaceSpecification` + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Reservation + :return: The :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest` + associated with the request for machines + """ + ls = 'LaunchSpecification' + params = {'%s.ImageId' % ls: image_id, + 'Type': type, + 'SpotPrice': price} + if count: + params['InstanceCount'] = count + if valid_from: + params['ValidFrom'] = valid_from + if valid_until: + params['ValidUntil'] = valid_until + if launch_group: + params['LaunchGroup'] = launch_group + if availability_zone_group: + params['AvailabilityZoneGroup'] = availability_zone_group + if key_name: + params['%s.KeyName' % ls] = key_name + if security_group_ids: + l = [] + for group in security_group_ids: + if isinstance(group, SecurityGroup): + l.append(group.id) + else: + l.append(group) + self.build_list_params(params, l, + '%s.SecurityGroupId' % ls) + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, SecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, '%s.SecurityGroup' % ls) + if user_data: + params['%s.UserData' % ls] = base64.b64encode(user_data) + if addressing_type: + params['%s.AddressingType' % ls] = addressing_type + if instance_type: + params['%s.InstanceType' % ls] = instance_type + if placement: + params['%s.Placement.AvailabilityZone' % ls] = placement + if kernel_id: + params['%s.KernelId' % ls] = kernel_id + if ramdisk_id: + params['%s.RamdiskId' % ls] = ramdisk_id + if monitoring_enabled: + params['%s.Monitoring.Enabled' % ls] = 'true' + if subnet_id: + params['%s.SubnetId' % ls] = subnet_id + if placement_group: + params['%s.Placement.GroupName' % ls] = placement_group + if block_device_map: + block_device_map.ec2_build_list_params(params, '%s.' % ls) + if instance_profile_name: + params['%s.IamInstanceProfile.Name' % ls] = instance_profile_name + if instance_profile_arn: + params['%s.IamInstanceProfile.Arn' % ls] = instance_profile_arn + if ebs_optimized: + params['%s.EbsOptimized' % ls] = 'true' + if network_interfaces: + network_interfaces.build_list_params(params, prefix=ls + '.') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('RequestSpotInstances', params, + [('item', SpotInstanceRequest)], + verb='POST') + + def cancel_spot_instance_requests(self, request_ids, dry_run=False): + """ + Cancel the specified Spot Instance Requests. + + :type request_ids: list + :param request_ids: A list of strings of the Request IDs to terminate + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of the instances terminated + """ + params = {} + if request_ids: + self.build_list_params(params, request_ids, 'SpotInstanceRequestId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('CancelSpotInstanceRequests', params, + [('item', SpotInstanceRequest)], verb='POST') + + def get_spot_datafeed_subscription(self, dry_run=False): + """ + Return the current spot instance data feed subscription + associated with this account, if any. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription` + :return: The datafeed subscription object or None + """ + params = {} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeSpotDatafeedSubscription', + params, SpotDatafeedSubscription, verb='POST') + + def create_spot_datafeed_subscription(self, bucket, prefix, dry_run=False): + """ + Create a spot instance datafeed subscription for this account. + + :type bucket: str or unicode + :param bucket: The name of the bucket where spot instance data + will be written. The account issuing this request + must have FULL_CONTROL access to the bucket + specified in the request. + + :type prefix: str or unicode + :param prefix: An optional prefix that will be pre-pended to all + data files written to the bucket. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription` + :return: The datafeed subscription object or None + """ + params = {'Bucket': bucket} + if prefix: + params['Prefix'] = prefix + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateSpotDatafeedSubscription', + params, SpotDatafeedSubscription, verb='POST') + + def delete_spot_datafeed_subscription(self, dry_run=False): + """ + Delete the current spot instance data feed subscription + associated with this account + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteSpotDatafeedSubscription', + params, verb='POST') + + # Zone methods + + def get_all_zones(self, zones=None, filters=None, dry_run=False): + """ + Get all Availability Zones associated with the current region. + + :type zones: list + :param zones: Optional list of zones. If this list is present, + only the Zones associated with these zone names + will be returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list of :class:`boto.ec2.zone.Zone` + :return: The requested Zone objects + """ + params = {} + if zones: + self.build_list_params(params, zones, 'ZoneName') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeAvailabilityZones', params, + [('item', Zone)], verb='POST') + + # Address methods + + def get_all_addresses(self, addresses=None, filters=None, + allocation_ids=None, dry_run=False): + """ + Get all EIP's associated with the current credentials. + + :type addresses: list + :param addresses: Optional list of addresses. If this list is present, + only the Addresses associated with these addresses + will be returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type allocation_ids: list + :param allocation_ids: Optional list of allocation IDs. If this list is + present, only the Addresses associated with the given + allocation IDs will be returned. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list of :class:`boto.ec2.address.Address` + :return: The requested Address objects + """ + params = {} + if addresses: + self.build_list_params(params, addresses, 'PublicIp') + if allocation_ids: + self.build_list_params(params, allocation_ids, 'AllocationId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeAddresses', params, [('item', Address)], verb='POST') + + def allocate_address(self, domain=None, dry_run=False): + """ + Allocate a new Elastic IP address and associate it with your account. + + :type domain: string + :param domain: Optional string. If domain is set to "vpc" the address + will be allocated to VPC . Will return address object with + allocation_id. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.address.Address` + :return: The newly allocated Address + """ + params = {} + + if domain is not None: + params['Domain'] = domain + + if dry_run: + params['DryRun'] = 'true' + + return self.get_object('AllocateAddress', params, Address, verb='POST') + + def assign_private_ip_addresses(self, network_interface_id=None, + private_ip_addresses=None, + secondary_private_ip_address_count=None, + allow_reassignment=False, dry_run=False): + """ + Assigns one or more secondary private IP addresses to a network + interface in Amazon VPC. + + :type network_interface_id: string + :param network_interface_id: The network interface to which the IP + address will be assigned. + + :type private_ip_addresses: list + :param private_ip_addresses: Assigns the specified IP addresses as + secondary IP addresses to the network interface. + + :type secondary_private_ip_address_count: int + :param secondary_private_ip_address_count: The number of secondary IP + addresses to assign to the network interface. You cannot specify + this parameter when also specifying private_ip_addresses. + + :type allow_reassignment: bool + :param allow_reassignment: Specifies whether to allow an IP address + that is already assigned to another network interface or instance + to be reassigned to the specified network interface. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {} + + if network_interface_id is not None: + params['NetworkInterfaceId'] = network_interface_id + + if private_ip_addresses is not None: + self.build_list_params(params, private_ip_addresses, + 'PrivateIpAddress') + elif secondary_private_ip_address_count is not None: + params['SecondaryPrivateIpAddressCount'] = \ + secondary_private_ip_address_count + + if allow_reassignment: + params['AllowReassignment'] = 'true' + + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('AssignPrivateIpAddresses', params, verb='POST') + + def _associate_address(self, status, instance_id=None, public_ip=None, + allocation_id=None, network_interface_id=None, + private_ip_address=None, allow_reassociation=False, + dry_run=False): + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + elif network_interface_id is not None: + params['NetworkInterfaceId'] = network_interface_id + + # Allocation id trumps public ip in order to associate with VPCs + if allocation_id is not None: + params['AllocationId'] = allocation_id + elif public_ip is not None: + params['PublicIp'] = public_ip + + if private_ip_address is not None: + params['PrivateIpAddress'] = private_ip_address + + if allow_reassociation: + params['AllowReassociation'] = 'true' + + if dry_run: + params['DryRun'] = 'true' + + if status: + return self.get_status('AssociateAddress', params, verb='POST') + else: + return self.get_object('AssociateAddress', params, Address, + verb='POST') + + def associate_address(self, instance_id=None, public_ip=None, + allocation_id=None, network_interface_id=None, + private_ip_address=None, allow_reassociation=False, + dry_run=False): + """ + Associate an Elastic IP address with a currently running instance. + This requires one of ``public_ip`` or ``allocation_id`` depending + on if you're associating a VPC address or a plain EC2 address. + + When using an Allocation ID, make sure to pass ``None`` for ``public_ip`` + as EC2 expects a single parameter and if ``public_ip`` is passed boto + will preference that instead of ``allocation_id``. + + :type instance_id: string + :param instance_id: The ID of the instance + + :type public_ip: string + :param public_ip: The public IP address for EC2 based allocations. + + :type allocation_id: string + :param allocation_id: The allocation ID for a VPC-based elastic IP. + + :type network_interface_id: string + :param network_interface_id: The network interface ID to which + elastic IP is to be assigned to + + :type private_ip_address: string + :param private_ip_address: The primary or secondary private IP address + to associate with the Elastic IP address. + + :type allow_reassociation: bool + :param allow_reassociation: Specify this option to allow an Elastic IP + address that is already associated with another network interface + or instance to be re-associated with the specified instance or + interface. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self._associate_address(True, instance_id=instance_id, + public_ip=public_ip, allocation_id=allocation_id, + network_interface_id=network_interface_id, + private_ip_address=private_ip_address, + allow_reassociation=allow_reassociation, dry_run=dry_run) + + def associate_address_object(self, instance_id=None, public_ip=None, + allocation_id=None, network_interface_id=None, + private_ip_address=None, allow_reassociation=False, + dry_run=False): + """ + Associate an Elastic IP address with a currently running instance. + This requires one of ``public_ip`` or ``allocation_id`` depending + on if you're associating a VPC address or a plain EC2 address. + + When using an Allocation ID, make sure to pass ``None`` for ``public_ip`` + as EC2 expects a single parameter and if ``public_ip`` is passed boto + will preference that instead of ``allocation_id``. + + :type instance_id: string + :param instance_id: The ID of the instance + + :type public_ip: string + :param public_ip: The public IP address for EC2 based allocations. + + :type allocation_id: string + :param allocation_id: The allocation ID for a VPC-based elastic IP. + + :type network_interface_id: string + :param network_interface_id: The network interface ID to which + elastic IP is to be assigned to + + :type private_ip_address: string + :param private_ip_address: The primary or secondary private IP address + to associate with the Elastic IP address. + + :type allow_reassociation: bool + :param allow_reassociation: Specify this option to allow an Elastic IP + address that is already associated with another network interface + or instance to be re-associated with the specified instance or + interface. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: class:`boto.ec2.address.Address` + :return: The associated address instance + """ + return self._associate_address(False, instance_id=instance_id, + public_ip=public_ip, allocation_id=allocation_id, + network_interface_id=network_interface_id, + private_ip_address=private_ip_address, + allow_reassociation=allow_reassociation, dry_run=dry_run) + + def disassociate_address(self, public_ip=None, association_id=None, + dry_run=False): + """ + Disassociate an Elastic IP address from a currently running instance. + + :type public_ip: string + :param public_ip: The public IP address for EC2 elastic IPs. + + :type association_id: string + :param association_id: The association ID for a VPC based elastic ip. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {} + + # If there is an association id it trumps public ip + # in order to successfully dissassociate with a VPC elastic ip + if association_id is not None: + params['AssociationId'] = association_id + elif public_ip is not None: + params['PublicIp'] = public_ip + + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('DisassociateAddress', params, verb='POST') + + def release_address(self, public_ip=None, allocation_id=None, + dry_run=False): + """ + Free up an Elastic IP address. Pass a public IP address to + release an EC2 Elastic IP address and an AllocationId to + release a VPC Elastic IP address. You should only pass + one value. + + This requires one of ``public_ip`` or ``allocation_id`` depending + on if you're associating a VPC address or a plain EC2 address. + + When using an Allocation ID, make sure to pass ``None`` for ``public_ip`` + as EC2 expects a single parameter and if ``public_ip`` is passed boto + will preference that instead of ``allocation_id``. + + :type public_ip: string + :param public_ip: The public IP address for EC2 elastic IPs. + + :type allocation_id: string + :param allocation_id: The Allocation ID for VPC elastic IPs. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {} + + if public_ip is not None: + params['PublicIp'] = public_ip + elif allocation_id is not None: + params['AllocationId'] = allocation_id + + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('ReleaseAddress', params, verb='POST') + + def unassign_private_ip_addresses(self, network_interface_id=None, + private_ip_addresses=None, dry_run=False): + """ + Unassigns one or more secondary private IP addresses from a network + interface in Amazon VPC. + + :type network_interface_id: string + :param network_interface_id: The network interface from which the + secondary private IP address will be unassigned. + + :type private_ip_addresses: list + :param private_ip_addresses: Specifies the secondary private IP + addresses that you want to unassign from the network interface. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {} + + if network_interface_id is not None: + params['NetworkInterfaceId'] = network_interface_id + + if private_ip_addresses is not None: + self.build_list_params(params, private_ip_addresses, + 'PrivateIpAddress') + + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('UnassignPrivateIpAddresses', params, + verb='POST') + + # Volume methods + + def get_all_volumes(self, volume_ids=None, filters=None, dry_run=False): + """ + Get all Volumes associated with the current credentials. + + :type volume_ids: list + :param volume_ids: Optional list of volume ids. If this list + is present, only the volumes associated with + these volume ids will be returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list of :class:`boto.ec2.volume.Volume` + :return: The requested Volume objects + """ + params = {} + if volume_ids: + self.build_list_params(params, volume_ids, 'VolumeId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVolumes', params, + [('item', Volume)], verb='POST') + + def get_all_volume_status(self, volume_ids=None, + max_results=None, next_token=None, + filters=None, dry_run=False): + """ + Retrieve the status of one or more volumes. + + :type volume_ids: list + :param volume_ids: A list of strings of volume IDs + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :type next_token: str + :param next_token: A string specifying the next paginated set + of results to return. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of volume status. + """ + params = {} + if volume_ids: + self.build_list_params(params, volume_ids, 'VolumeId') + if max_results: + params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeVolumeStatus', params, + VolumeStatusSet, verb='POST') + + def enable_volume_io(self, volume_id, dry_run=False): + """ + Enables I/O operations for a volume that had I/O operations + disabled because the data on the volume was potentially inconsistent. + + :type volume_id: str + :param volume_id: The ID of the volume. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VolumeId': volume_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('EnableVolumeIO', params, verb='POST') + + def get_volume_attribute(self, volume_id, + attribute='autoEnableIO', dry_run=False): + """ + Describes attribute of the volume. + + :type volume_id: str + :param volume_id: The ID of the volume. + + :type attribute: str + :param attribute: The requested attribute. Valid values are: + + * autoEnableIO + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list of :class:`boto.ec2.volume.VolumeAttribute` + :return: The requested Volume attribute + """ + params = {'VolumeId': volume_id, 'Attribute': attribute} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeVolumeAttribute', params, + VolumeAttribute, verb='POST') + + def modify_volume_attribute(self, volume_id, attribute, new_value, + dry_run=False): + """ + Changes an attribute of an Volume. + + :type volume_id: string + :param volume_id: The volume id you wish to change + + :type attribute: string + :param attribute: The attribute you wish to change. Valid values are: + AutoEnableIO. + + :type new_value: string + :param new_value: The new value of the attribute. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'VolumeId': volume_id} + if attribute == 'AutoEnableIO': + params['AutoEnableIO.Value'] = new_value + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifyVolumeAttribute', params, verb='POST') + + def create_volume(self, size, zone, snapshot=None, volume_type=None, + iops=None, encrypted=False, dry_run=False): + """ + Create a new EBS Volume. + + :type size: int + :param size: The size of the new volume, in GiB + + :type zone: string or :class:`boto.ec2.zone.Zone` + :param zone: The availability zone in which the Volume will be created. + + :type snapshot: string or :class:`boto.ec2.snapshot.Snapshot` + :param snapshot: The snapshot from which the new Volume will be + created. + + :type volume_type: string + :param volume_type: The type of the volume. (optional). Valid + values are: standard | io1 | gp2. + + :type iops: int + :param iops: The provisioned IOPS you want to associate with + this volume. (optional) + + :type encrypted: bool + :param encrypted: Specifies whether the volume should be encrypted. + (optional) + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + if isinstance(zone, Zone): + zone = zone.name + params = {'AvailabilityZone': zone} + if size: + params['Size'] = size + if snapshot: + if isinstance(snapshot, Snapshot): + snapshot = snapshot.id + params['SnapshotId'] = snapshot + if volume_type: + params['VolumeType'] = volume_type + if iops: + params['Iops'] = str(iops) + if encrypted: + params['Encrypted'] = 'true' + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateVolume', params, Volume, verb='POST') + + def delete_volume(self, volume_id, dry_run=False): + """ + Delete an EBS volume. + + :type volume_id: str + :param volume_id: The ID of the volume to be delete. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VolumeId': volume_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVolume', params, verb='POST') + + def attach_volume(self, volume_id, instance_id, device, dry_run=False): + """ + Attach an EBS volume to an EC2 instance. + + :type volume_id: str + :param volume_id: The ID of the EBS volume to be attached. + + :type instance_id: str + :param instance_id: The ID of the EC2 instance to which it will + be attached. + + :type device: str + :param device: The device on the instance through which the + volume will be exposted (e.g. /dev/sdh) + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'InstanceId': instance_id, + 'VolumeId': volume_id, + 'Device': device} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('AttachVolume', params, verb='POST') + + def detach_volume(self, volume_id, instance_id=None, + device=None, force=False, dry_run=False): + """ + Detach an EBS volume from an EC2 instance. + + :type volume_id: str + :param volume_id: The ID of the EBS volume to be attached. + + :type instance_id: str + :param instance_id: The ID of the EC2 instance from which it will + be detached. + + :type device: str + :param device: The device on the instance through which the + volume is exposted (e.g. /dev/sdh) + + :type force: bool + :param force: Forces detachment if the previous detachment + attempt did not occur cleanly. This option can lead to + data loss or a corrupted file system. Use this option only + as a last resort to detach a volume from a failed + instance. The instance will not have an opportunity to + flush file system caches nor file system meta data. If you + use this option, you must perform file system check and + repair procedures. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VolumeId': volume_id} + if instance_id: + params['InstanceId'] = instance_id + if device: + params['Device'] = device + if force: + params['Force'] = 'true' + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachVolume', params, verb='POST') + + # Snapshot methods + + def get_all_snapshots(self, snapshot_ids=None, + owner=None, restorable_by=None, + filters=None, dry_run=False): + """ + Get all EBS Snapshots associated with the current credentials. + + :type snapshot_ids: list + :param snapshot_ids: Optional list of snapshot ids. If this list is + present, only the Snapshots associated with + these snapshot ids will be returned. + + :type owner: str or list + :param owner: If present, only the snapshots owned by the specified user(s) + will be returned. Valid values are: + + * self + * amazon + * AWS Account ID + + :type restorable_by: str or list + :param restorable_by: If present, only the snapshots that are restorable + by the specified account id(s) will be returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list of :class:`boto.ec2.snapshot.Snapshot` + :return: The requested Snapshot objects + """ + params = {} + if snapshot_ids: + self.build_list_params(params, snapshot_ids, 'SnapshotId') + + if owner: + self.build_list_params(params, owner, 'Owner') + if restorable_by: + self.build_list_params(params, restorable_by, 'RestorableBy') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeSnapshots', params, + [('item', Snapshot)], verb='POST') + + def create_snapshot(self, volume_id, description=None, dry_run=False): + """ + Create a snapshot of an existing EBS Volume. + + :type volume_id: str + :param volume_id: The ID of the volume to be snapshot'ed + + :type description: str + :param description: A description of the snapshot. + Limited to 255 characters. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.snapshot.Snapshot` + :return: The created Snapshot object + """ + params = {'VolumeId': volume_id} + if description: + params['Description'] = description[0:255] + if dry_run: + params['DryRun'] = 'true' + snapshot = self.get_object('CreateSnapshot', params, + Snapshot, verb='POST') + volume = self.get_all_volumes([volume_id], dry_run=dry_run)[0] + volume_name = volume.tags.get('Name') + if volume_name: + snapshot.add_tag('Name', volume_name) + return snapshot + + def delete_snapshot(self, snapshot_id, dry_run=False): + """ + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'SnapshotId': snapshot_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteSnapshot', params, verb='POST') + + def copy_snapshot(self, source_region, source_snapshot_id, + description=None, dry_run=False): + """ + Copies a point-in-time snapshot of an Amazon Elastic Block Store + (Amazon EBS) volume and stores it in Amazon Simple Storage Service + (Amazon S3). You can copy the snapshot within the same region or from + one region to another. You can use the snapshot to create new Amazon + EBS volumes or Amazon Machine Images (AMIs). + + + :type source_region: str + :param source_region: The ID of the AWS region that contains the + snapshot to be copied (e.g 'us-east-1', 'us-west-2', etc.). + + :type source_snapshot_id: str + :param source_snapshot_id: The ID of the Amazon EBS snapshot to copy + + :type description: str + :param description: A description of the new Amazon EBS snapshot. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: str + :return: The snapshot ID + + """ + params = { + 'SourceRegion': source_region, + 'SourceSnapshotId': source_snapshot_id, + } + if description is not None: + params['Description'] = description + if dry_run: + params['DryRun'] = 'true' + snapshot = self.get_object('CopySnapshot', params, Snapshot, + verb='POST') + return snapshot.id + + def trim_snapshots(self, hourly_backups=8, daily_backups=7, + weekly_backups=4, monthly_backups=True): + """ + Trim excess snapshots, based on when they were taken. More current + snapshots are retained, with the number retained decreasing as you + move back in time. + + If ebs volumes have a 'Name' tag with a value, their snapshots + will be assigned the same tag when they are created. The values + of the 'Name' tags for snapshots are used by this function to + group snapshots taken from the same volume (or from a series + of like-named volumes over time) for trimming. + + For every group of like-named snapshots, this function retains + the newest and oldest snapshots, as well as, by default, the + first snapshots taken in each of the last eight hours, the first + snapshots taken in each of the last seven days, the first snapshots + taken in the last 4 weeks (counting Midnight Sunday morning as + the start of the week), and the first snapshot from the first + day of each month forever. + + :type hourly_backups: int + :param hourly_backups: How many recent hourly backups should be saved. + + :type daily_backups: int + :param daily_backups: How many recent daily backups should be saved. + + :type weekly_backups: int + :param weekly_backups: How many recent weekly backups should be saved. + + :type monthly_backups: int + :param monthly_backups: How many monthly backups should be saved. Use True for no limit. + """ + + # This function first builds up an ordered list of target times + # that snapshots should be saved for (last 8 hours, last 7 days, etc.). + # Then a map of snapshots is constructed, with the keys being + # the snapshot / volume names and the values being arrays of + # chronologically sorted snapshots. + # Finally, for each array in the map, we go through the snapshot + # array and the target time array in an interleaved fashion, + # deleting snapshots whose start_times don't immediately follow a + # target time (we delete a snapshot if there's another snapshot + # that was made closer to the preceding target time). + + now = datetime.utcnow() + last_hour = datetime(now.year, now.month, now.day, now.hour) + last_midnight = datetime(now.year, now.month, now.day) + last_sunday = datetime(now.year, now.month, now.day) - timedelta(days=(now.weekday() + 1) % 7) + start_of_month = datetime(now.year, now.month, 1) + + target_backup_times = [] + + # there are no snapshots older than 1/1/2007 + oldest_snapshot_date = datetime(2007, 1, 1) + + for hour in range(0, hourly_backups): + target_backup_times.append(last_hour - timedelta(hours=hour)) + + for day in range(0, daily_backups): + target_backup_times.append(last_midnight - timedelta(days=day)) + + for week in range(0, weekly_backups): + target_backup_times.append(last_sunday - timedelta(weeks=week)) + + one_day = timedelta(days=1) + monthly_snapshots_added = 0 + while (start_of_month > oldest_snapshot_date and + (monthly_backups is True or + monthly_snapshots_added < monthly_backups)): + # append the start of the month to the list of + # snapshot dates to save: + target_backup_times.append(start_of_month) + monthly_snapshots_added += 1 + # there's no timedelta setting for one month, so instead: + # decrement the day by one, so we go to the final day of + # the previous month... + start_of_month -= one_day + # ... and then go to the first day of that previous month: + start_of_month = datetime(start_of_month.year, + start_of_month.month, 1) + + temp = [] + + for t in target_backup_times: + if temp.__contains__(t) == False: + temp.append(t) + + # sort to make the oldest dates first, and make sure the month start + # and last four week's start are in the proper order + target_backup_times = sorted(temp) + + # get all the snapshots, sort them by date and time, and + # organize them into one array for each volume: + all_snapshots = self.get_all_snapshots(owner = 'self') + all_snapshots.sort(cmp = lambda x, y: cmp(x.start_time, y.start_time)) + snaps_for_each_volume = {} + for snap in all_snapshots: + # the snapshot name and the volume name are the same. + # The snapshot name is set from the volume + # name at the time the snapshot is taken + volume_name = snap.tags.get('Name') + if volume_name: + # only examine snapshots that have a volume name + snaps_for_volume = snaps_for_each_volume.get(volume_name) + if not snaps_for_volume: + snaps_for_volume = [] + snaps_for_each_volume[volume_name] = snaps_for_volume + snaps_for_volume.append(snap) + + # Do a running comparison of snapshot dates to desired time + #periods, keeping the oldest snapshot in each + # time period and deleting the rest: + for volume_name in snaps_for_each_volume: + snaps = snaps_for_each_volume[volume_name] + snaps = snaps[:-1] # never delete the newest snapshot + time_period_number = 0 + snap_found_for_this_time_period = False + for snap in snaps: + check_this_snap = True + while check_this_snap and time_period_number < target_backup_times.__len__(): + snap_date = datetime.strptime(snap.start_time, + '%Y-%m-%dT%H:%M:%S.000Z') + if snap_date < target_backup_times[time_period_number]: + # the snap date is before the cutoff date. + # Figure out if it's the first snap in this + # date range and act accordingly (since both + #date the date ranges and the snapshots + # are sorted chronologically, we know this + #snapshot isn't in an earlier date range): + if snap_found_for_this_time_period == True: + if not snap.tags.get('preserve_snapshot'): + # as long as the snapshot wasn't marked + # with the 'preserve_snapshot' tag, delete it: + try: + self.delete_snapshot(snap.id) + boto.log.info('Trimmed snapshot %s (%s)' % (snap.tags['Name'], snap.start_time)) + except EC2ResponseError: + boto.log.error('Attempt to trim snapshot %s (%s) failed. Possible result of a race condition with trimming on another server?' % (snap.tags['Name'], snap.start_time)) + # go on and look at the next snapshot, + #leaving the time period alone + else: + # this was the first snapshot found for this + #time period. Leave it alone and look at the + # next snapshot: + snap_found_for_this_time_period = True + check_this_snap = False + else: + # the snap is after the cutoff date. Check it + # against the next cutoff date + time_period_number += 1 + snap_found_for_this_time_period = False + + def get_snapshot_attribute(self, snapshot_id, + attribute='createVolumePermission', + dry_run=False): + """ + Get information about an attribute of a snapshot. Only one attribute + can be specified per call. + + :type snapshot_id: str + :param snapshot_id: The ID of the snapshot. + + :type attribute: str + :param attribute: The requested attribute. Valid values are: + + * createVolumePermission + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list of :class:`boto.ec2.snapshotattribute.SnapshotAttribute` + :return: The requested Snapshot attribute + """ + params = {'Attribute': attribute} + if snapshot_id: + params['SnapshotId'] = snapshot_id + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeSnapshotAttribute', params, + SnapshotAttribute, verb='POST') + + def modify_snapshot_attribute(self, snapshot_id, + attribute='createVolumePermission', + operation='add', user_ids=None, groups=None, + dry_run=False): + """ + Changes an attribute of an image. + + :type snapshot_id: string + :param snapshot_id: The snapshot id you wish to change + + :type attribute: string + :param attribute: The attribute you wish to change. Valid values are: + createVolumePermission + + :type operation: string + :param operation: Either add or remove (this is required for changing + snapshot ermissions) + + :type user_ids: list + :param user_ids: The Amazon IDs of users to add/remove attributes + + :type groups: list + :param groups: The groups to add/remove attributes. The only valid + value at this time is 'all'. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'SnapshotId': snapshot_id, + 'Attribute': attribute, + 'OperationType': operation} + if user_ids: + self.build_list_params(params, user_ids, 'UserId') + if groups: + self.build_list_params(params, groups, 'UserGroup') + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifySnapshotAttribute', params, verb='POST') + + def reset_snapshot_attribute(self, snapshot_id, + attribute='createVolumePermission', + dry_run=False): + """ + Resets an attribute of a snapshot to its default value. + + :type snapshot_id: string + :param snapshot_id: ID of the snapshot + + :type attribute: string + :param attribute: The attribute to reset + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: Whether the operation succeeded or not + """ + params = {'SnapshotId': snapshot_id, + 'Attribute': attribute} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ResetSnapshotAttribute', params, verb='POST') + + # Keypair methods + + def get_all_key_pairs(self, keynames=None, filters=None, dry_run=False): + """ + Get all key pairs associated with your account. + + :type keynames: list + :param keynames: A list of the names of keypairs to retrieve. + If not provided, all key pairs will be returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.keypair.KeyPair` + """ + params = {} + if keynames: + self.build_list_params(params, keynames, 'KeyName') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeKeyPairs', params, + [('item', KeyPair)], verb='POST') + + def get_key_pair(self, keyname, dry_run=False): + """ + Convenience method to retrieve a specific keypair (KeyPair). + + :type keyname: string + :param keyname: The name of the keypair to retrieve + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.keypair.KeyPair` + :return: The KeyPair specified or None if it is not found + """ + try: + return self.get_all_key_pairs( + keynames=[keyname], + dry_run=dry_run + )[0] + except self.ResponseError as e: + if e.code == 'InvalidKeyPair.NotFound': + return None + else: + raise + + def create_key_pair(self, key_name, dry_run=False): + """ + Create a new key pair for your account. + This will create the key pair within the region you + are currently connected to. + + :type key_name: string + :param key_name: The name of the new keypair + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.keypair.KeyPair` + :return: The newly created :class:`boto.ec2.keypair.KeyPair`. + The material attribute of the new KeyPair object + will contain the the unencrypted PEM encoded RSA private key. + """ + params = {'KeyName': key_name} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateKeyPair', params, KeyPair, verb='POST') + + def delete_key_pair(self, key_name, dry_run=False): + """ + Delete a key pair from your account. + + :type key_name: string + :param key_name: The name of the keypair to delete + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'KeyName': key_name} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteKeyPair', params, verb='POST') + + def import_key_pair(self, key_name, public_key_material, dry_run=False): + """ + imports the public key from an RSA key pair that you created + with a third-party tool. + + Supported formats: + + * OpenSSH public key format (e.g., the format + in ~/.ssh/authorized_keys) + + * Base64 encoded DER format + + * SSH public key file format as specified in RFC4716 + + DSA keys are not supported. Make sure your key generator is + set up to create RSA keys. + + Supported lengths: 1024, 2048, and 4096. + + :type key_name: string + :param key_name: The name of the new keypair + + :type public_key_material: string + :param public_key_material: The public key. You must base64 encode + the public key material before sending + it to AWS. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.keypair.KeyPair` + :return: A :class:`boto.ec2.keypair.KeyPair` object representing + the newly imported key pair. This object will contain only + the key name and the fingerprint. + """ + public_key_material = base64.b64encode(public_key_material) + params = {'KeyName': key_name, + 'PublicKeyMaterial': public_key_material} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('ImportKeyPair', params, KeyPair, verb='POST') + + # SecurityGroup methods + + def get_all_security_groups(self, groupnames=None, group_ids=None, + filters=None, dry_run=False): + """ + Get all security groups associated with your account in a region. + + :type groupnames: list + :param groupnames: A list of the names of security groups to retrieve. + If not provided, all security groups will be + returned. + + :type group_ids: list + :param group_ids: A list of IDs of security groups to retrieve for + security groups within a VPC. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.securitygroup.SecurityGroup` + """ + params = {} + if groupnames is not None: + self.build_list_params(params, groupnames, 'GroupName') + if group_ids is not None: + self.build_list_params(params, group_ids, 'GroupId') + if filters is not None: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeSecurityGroups', params, + [('item', SecurityGroup)], verb='POST') + + def create_security_group(self, name, description, vpc_id=None, + dry_run=False): + """ + Create a new security group for your account. + This will create the security group within the region you + are currently connected to. + + :type name: string + :param name: The name of the new security group + + :type description: string + :param description: The description of the new security group + + :type vpc_id: string + :param vpc_id: The ID of the VPC to create the security group in, + if any. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.securitygroup.SecurityGroup` + :return: The newly created :class:`boto.ec2.securitygroup.SecurityGroup`. + """ + params = {'GroupName': name, + 'GroupDescription': description} + + if vpc_id is not None: + params['VpcId'] = vpc_id + + if dry_run: + params['DryRun'] = 'true' + + group = self.get_object('CreateSecurityGroup', params, + SecurityGroup, verb='POST') + group.name = name + group.description = description + if vpc_id is not None: + group.vpc_id = vpc_id + return group + + def delete_security_group(self, name=None, group_id=None, dry_run=False): + """ + Delete a security group from your account. + + :type name: string + :param name: The name of the security group to delete. + + :type group_id: string + :param group_id: The ID of the security group to delete within + a VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful. + """ + params = {} + + if name is not None: + params['GroupName'] = name + elif group_id is not None: + params['GroupId'] = group_id + + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('DeleteSecurityGroup', params, verb='POST') + + def authorize_security_group_deprecated(self, group_name, + src_security_group_name=None, + src_security_group_owner_id=None, + ip_protocol=None, + from_port=None, to_port=None, + cidr_ip=None, dry_run=False): + """ + NOTE: This method uses the old-style request parameters + that did not allow a port to be specified when + authorizing a group. + + :type group_name: string + :param group_name: The name of the security group you are adding + the rule to. + + :type src_security_group_name: string + :param src_security_group_name: The name of the security group you are + granting access to. + + :type src_security_group_owner_id: string + :param src_security_group_owner_id: The ID of the owner of the security + group you are granting access to. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are enabling + + :type to_port: int + :param to_port: The ending port number you are enabling + + :type to_port: string + :param to_port: The CIDR block you are providing access to. + See http://goo.gl/Yj5QC + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful. + """ + params = {'GroupName': group_name} + if src_security_group_name: + params['SourceSecurityGroupName'] = src_security_group_name + if src_security_group_owner_id: + params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id + if ip_protocol: + params['IpProtocol'] = ip_protocol + if from_port: + params['FromPort'] = from_port + if to_port: + params['ToPort'] = to_port + if cidr_ip: + params['CidrIp'] = cidr_ip + if dry_run: + params['DryRun'] = 'true' + return self.get_status('AuthorizeSecurityGroupIngress', params) + + def authorize_security_group(self, group_name=None, + src_security_group_name=None, + src_security_group_owner_id=None, + ip_protocol=None, + from_port=None, to_port=None, + cidr_ip=None, group_id=None, + src_security_group_group_id=None, + dry_run=False): + """ + Add a new rule to an existing security group. + You need to pass in either src_security_group_name and + src_security_group_owner_id OR ip_protocol, from_port, to_port, + and cidr_ip. In other words, either you are authorizing another + group or you are authorizing some ip-based rule. + + :type group_name: string + :param group_name: The name of the security group you are adding + the rule to. + + :type src_security_group_name: string + :param src_security_group_name: The name of the security group you are + granting access to. + + :type src_security_group_owner_id: string + :param src_security_group_owner_id: The ID of the owner of the security + group you are granting access to. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are enabling + + :type to_port: int + :param to_port: The ending port number you are enabling + + :type cidr_ip: string or list of strings + :param cidr_ip: The CIDR block you are providing access to. + See http://goo.gl/Yj5QC + + :type group_id: string + :param group_id: ID of the EC2 or VPC security group to + modify. This is required for VPC security groups and can + be used instead of group_name for EC2 security groups. + + :type src_security_group_group_id: string + :param src_security_group_group_id: The ID of the security + group you are granting access to. Can be used instead of + src_security_group_name + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful. + """ + if src_security_group_name: + if from_port is None and to_port is None and ip_protocol is None: + return self.authorize_security_group_deprecated( + group_name, src_security_group_name, + src_security_group_owner_id) + + params = {} + + if group_name: + params['GroupName'] = group_name + if group_id: + params['GroupId'] = group_id + if src_security_group_name: + param_name = 'IpPermissions.1.Groups.1.GroupName' + params[param_name] = src_security_group_name + if src_security_group_owner_id: + param_name = 'IpPermissions.1.Groups.1.UserId' + params[param_name] = src_security_group_owner_id + if src_security_group_group_id: + param_name = 'IpPermissions.1.Groups.1.GroupId' + params[param_name] = src_security_group_group_id + if ip_protocol: + params['IpPermissions.1.IpProtocol'] = ip_protocol + if from_port is not None: + params['IpPermissions.1.FromPort'] = from_port + if to_port is not None: + params['IpPermissions.1.ToPort'] = to_port + if cidr_ip: + if not isinstance(cidr_ip, list): + cidr_ip = [cidr_ip] + for i, single_cidr_ip in enumerate(cidr_ip): + params['IpPermissions.1.IpRanges.%d.CidrIp' % (i + 1)] = \ + single_cidr_ip + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('AuthorizeSecurityGroupIngress', + params, verb='POST') + + def authorize_security_group_egress(self, + group_id, + ip_protocol, + from_port=None, + to_port=None, + src_group_id=None, + cidr_ip=None, + dry_run=False): + """ + The action adds one or more egress rules to a VPC security + group. Specifically, this action permits instances in a + security group to send traffic to one or more destination + CIDR IP address ranges, or to one or more destination + security groups in the same VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = { + 'GroupId': group_id, + 'IpPermissions.1.IpProtocol': ip_protocol + } + + if from_port is not None: + params['IpPermissions.1.FromPort'] = from_port + if to_port is not None: + params['IpPermissions.1.ToPort'] = to_port + if src_group_id is not None: + params['IpPermissions.1.Groups.1.GroupId'] = src_group_id + if cidr_ip is not None: + params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('AuthorizeSecurityGroupEgress', + params, verb='POST') + + def revoke_security_group_deprecated(self, group_name, + src_security_group_name=None, + src_security_group_owner_id=None, + ip_protocol=None, + from_port=None, to_port=None, + cidr_ip=None, dry_run=False): + """ + NOTE: This method uses the old-style request parameters + that did not allow a port to be specified when + authorizing a group. + + Remove an existing rule from an existing security group. + You need to pass in either src_security_group_name and + src_security_group_owner_id OR ip_protocol, from_port, to_port, + and cidr_ip. In other words, either you are revoking another + group or you are revoking some ip-based rule. + + :type group_name: string + :param group_name: The name of the security group you are removing + the rule from. + + :type src_security_group_name: string + :param src_security_group_name: The name of the security group you are + revoking access to. + + :type src_security_group_owner_id: string + :param src_security_group_owner_id: The ID of the owner of the security + group you are revoking access to. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are disabling + + :type to_port: int + :param to_port: The ending port number you are disabling + + :type to_port: string + :param to_port: The CIDR block you are revoking access to. + http://goo.gl/Yj5QC + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful. + """ + params = {'GroupName': group_name} + if src_security_group_name: + params['SourceSecurityGroupName'] = src_security_group_name + if src_security_group_owner_id: + params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id + if ip_protocol: + params['IpProtocol'] = ip_protocol + if from_port: + params['FromPort'] = from_port + if to_port: + params['ToPort'] = to_port + if cidr_ip: + params['CidrIp'] = cidr_ip + if dry_run: + params['DryRun'] = 'true' + return self.get_status('RevokeSecurityGroupIngress', params) + + def revoke_security_group(self, group_name=None, + src_security_group_name=None, + src_security_group_owner_id=None, + ip_protocol=None, from_port=None, to_port=None, + cidr_ip=None, group_id=None, + src_security_group_group_id=None, dry_run=False): + """ + Remove an existing rule from an existing security group. + You need to pass in either src_security_group_name and + src_security_group_owner_id OR ip_protocol, from_port, to_port, + and cidr_ip. In other words, either you are revoking another + group or you are revoking some ip-based rule. + + :type group_name: string + :param group_name: The name of the security group you are removing + the rule from. + + :type src_security_group_name: string + :param src_security_group_name: The name of the security group you are + revoking access to. + + :type src_security_group_owner_id: string + :param src_security_group_owner_id: The ID of the owner of the security + group you are revoking access to. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are disabling + + :type to_port: int + :param to_port: The ending port number you are disabling + + :type cidr_ip: string + :param cidr_ip: The CIDR block you are revoking access to. + See http://goo.gl/Yj5QC + + :type group_id: string + :param group_id: ID of the EC2 or VPC security group to + modify. This is required for VPC security groups and can + be used instead of group_name for EC2 security groups. + + :type src_security_group_group_id: string + :param src_security_group_group_id: The ID of the security group + for which you are revoking access. Can be used instead + of src_security_group_name + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful. + """ + if src_security_group_name: + if from_port is None and to_port is None and ip_protocol is None: + return self.revoke_security_group_deprecated( + group_name, src_security_group_name, + src_security_group_owner_id) + params = {} + if group_name is not None: + params['GroupName'] = group_name + if group_id is not None: + params['GroupId'] = group_id + if src_security_group_name: + param_name = 'IpPermissions.1.Groups.1.GroupName' + params[param_name] = src_security_group_name + if src_security_group_group_id: + param_name = 'IpPermissions.1.Groups.1.GroupId' + params[param_name] = src_security_group_group_id + if src_security_group_owner_id: + param_name = 'IpPermissions.1.Groups.1.UserId' + params[param_name] = src_security_group_owner_id + if ip_protocol: + params['IpPermissions.1.IpProtocol'] = ip_protocol + if from_port is not None: + params['IpPermissions.1.FromPort'] = from_port + if to_port is not None: + params['IpPermissions.1.ToPort'] = to_port + if cidr_ip: + params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip + if dry_run: + params['DryRun'] = 'true' + return self.get_status('RevokeSecurityGroupIngress', + params, verb='POST') + + def revoke_security_group_egress(self, + group_id, + ip_protocol, + from_port=None, + to_port=None, + src_group_id=None, + cidr_ip=None, dry_run=False): + """ + Remove an existing egress rule from an existing VPC security + group. You need to pass in an ip_protocol, from_port and + to_port range only if the protocol you are using is + port-based. You also need to pass in either a src_group_id or + cidr_ip. + + :type group_name: string + :param group_id: The name of the security group you are removing + the rule from. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp | -1 + + :type from_port: int + :param from_port: The beginning port number you are disabling + + :type to_port: int + :param to_port: The ending port number you are disabling + + :type src_group_id: src_group_id + :param src_group_id: The source security group you are + revoking access to. + + :type cidr_ip: string + :param cidr_ip: The CIDR block you are revoking access to. + See http://goo.gl/Yj5QC + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful. + """ + + params = {} + if group_id: + params['GroupId'] = group_id + if ip_protocol: + params['IpPermissions.1.IpProtocol'] = ip_protocol + if from_port is not None: + params['IpPermissions.1.FromPort'] = from_port + if to_port is not None: + params['IpPermissions.1.ToPort'] = to_port + if src_group_id is not None: + params['IpPermissions.1.Groups.1.GroupId'] = src_group_id + if cidr_ip: + params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip + if dry_run: + params['DryRun'] = 'true' + return self.get_status('RevokeSecurityGroupEgress', + params, verb='POST') + + # + # Regions + # + + def get_all_regions(self, region_names=None, filters=None, dry_run=False): + """ + Get all available regions for the EC2 service. + + :type region_names: list of str + :param region_names: Names of regions to limit output + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` + """ + params = {} + if region_names: + self.build_list_params(params, region_names, 'RegionName') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + regions = self.get_list('DescribeRegions', params, + [('item', RegionInfo)], verb='POST') + for region in regions: + region.connection_cls = EC2Connection + return regions + + # + # Reservation methods + # + + def get_all_reserved_instances_offerings(self, + reserved_instances_offering_ids=None, + instance_type=None, + availability_zone=None, + product_description=None, + filters=None, + instance_tenancy=None, + offering_type=None, + include_marketplace=None, + min_duration=None, + max_duration=None, + max_instance_count=None, + next_token=None, + max_results=None, + dry_run=False): + """ + Describes Reserved Instance offerings that are available for purchase. + + :type reserved_instances_offering_ids: list + :param reserved_instances_id: One or more Reserved Instances + offering IDs. + + :type instance_type: str + :param instance_type: Displays Reserved Instances of the specified + instance type. + + :type availability_zone: str + :param availability_zone: Displays Reserved Instances within the + specified Availability Zone. + + :type product_description: str + :param product_description: Displays Reserved Instances with the + specified product description. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type instance_tenancy: string + :param instance_tenancy: The tenancy of the Reserved Instance offering. + A Reserved Instance with tenancy of dedicated will run on + single-tenant hardware and can only be launched within a VPC. + + :type offering_type: string + :param offering_type: The Reserved Instance offering type. Valid + Values: `"Heavy Utilization" | "Medium Utilization" | "Light + Utilization"` + + :type include_marketplace: bool + :param include_marketplace: Include Marketplace offerings in the + response. + + :type min_duration: int :param min_duration: Minimum duration (in + seconds) to filter when searching for offerings. + + :type max_duration: int + :param max_duration: Maximum duration (in seconds) to filter when + searching for offerings. + + :type max_instance_count: int + :param max_instance_count: Maximum number of instances to filter when + searching for offerings. + + :type next_token: string + :param next_token: Token to use when requesting the next paginated set + of offerings. + + :type max_results: int + :param max_results: Maximum number of offerings to return per call. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of + :class:`boto.ec2.reservedinstance.ReservedInstancesOffering`. + + """ + params = {} + if reserved_instances_offering_ids is not None: + self.build_list_params(params, reserved_instances_offering_ids, + 'ReservedInstancesOfferingId') + if instance_type: + params['InstanceType'] = instance_type + if availability_zone: + params['AvailabilityZone'] = availability_zone + if product_description: + params['ProductDescription'] = product_description + if filters: + self.build_filter_params(params, filters) + if instance_tenancy is not None: + params['InstanceTenancy'] = instance_tenancy + if offering_type is not None: + params['OfferingType'] = offering_type + if include_marketplace is not None: + if include_marketplace: + params['IncludeMarketplace'] = 'true' + else: + params['IncludeMarketplace'] = 'false' + if min_duration is not None: + params['MinDuration'] = str(min_duration) + if max_duration is not None: + params['MaxDuration'] = str(max_duration) + if max_instance_count is not None: + params['MaxInstanceCount'] = str(max_instance_count) + if next_token is not None: + params['NextToken'] = next_token + if max_results is not None: + params['MaxResults'] = str(max_results) + if dry_run: + params['DryRun'] = 'true' + + return self.get_list('DescribeReservedInstancesOfferings', + params, [('item', ReservedInstancesOffering)], + verb='POST') + + def get_all_reserved_instances(self, reserved_instances_id=None, + filters=None, dry_run=False): + """ + Describes one or more of the Reserved Instances that you purchased. + + :type reserved_instance_ids: list + :param reserved_instance_ids: A list of the reserved instance ids that + will be returned. If not provided, all reserved instances + will be returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstance` + """ + params = {} + if reserved_instances_id: + self.build_list_params(params, reserved_instances_id, + 'ReservedInstancesId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeReservedInstances', + params, [('item', ReservedInstance)], verb='POST') + + def purchase_reserved_instance_offering(self, + reserved_instances_offering_id, + instance_count=1, limit_price=None, + dry_run=False): + """ + Purchase a Reserved Instance for use with your account. + ** CAUTION ** + This request can result in large amounts of money being charged to your + AWS account. Use with caution! + + :type reserved_instances_offering_id: string + :param reserved_instances_offering_id: The offering ID of the Reserved + Instance to purchase + + :type instance_count: int + :param instance_count: The number of Reserved Instances to purchase. + Default value is 1. + + :type limit_price: tuple + :param instance_count: Limit the price on the total order. + Must be a tuple of (amount, currency_code), for example: + (100.0, 'USD'). + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.reservedinstance.ReservedInstance` + :return: The newly created Reserved Instance + """ + params = { + 'ReservedInstancesOfferingId': reserved_instances_offering_id, + 'InstanceCount': instance_count} + if limit_price is not None: + params['LimitPrice.Amount'] = str(limit_price[0]) + params['LimitPrice.CurrencyCode'] = str(limit_price[1]) + if dry_run: + params['DryRun'] = 'true' + return self.get_object('PurchaseReservedInstancesOffering', params, + ReservedInstance, verb='POST') + + def create_reserved_instances_listing(self, reserved_instances_id, + instance_count, price_schedules, + client_token, dry_run=False): + """Creates a new listing for Reserved Instances. + + Creates a new listing for Amazon EC2 Reserved Instances that will be + sold in the Reserved Instance Marketplace. You can submit one Reserved + Instance listing at a time. + + The Reserved Instance Marketplace matches sellers who want to resell + Reserved Instance capacity that they no longer need with buyers who + want to purchase additional capacity. Reserved Instances bought and + sold through the Reserved Instance Marketplace work like any other + Reserved Instances. + + If you want to sell your Reserved Instances, you must first register as + a Seller in the Reserved Instance Marketplace. After completing the + registration process, you can create a Reserved Instance Marketplace + listing of some or all of your Reserved Instances, and specify the + upfront price you want to receive for them. Your Reserved Instance + listings then become available for purchase. + + :type reserved_instances_id: string + :param reserved_instances_id: The ID of the Reserved Instance that + will be listed. + + :type instance_count: int + :param instance_count: The number of instances that are a part of a + Reserved Instance account that will be listed in the Reserved + Instance Marketplace. This number should be less than or equal to + the instance count associated with the Reserved Instance ID + specified in this call. + + :type price_schedules: List of tuples + :param price_schedules: A list specifying the price of the Reserved + Instance for each month remaining in the Reserved Instance term. + Each tuple contains two elements, the price and the term. For + example, for an instance that 11 months remaining in its term, + we can have a price schedule with an upfront price of $2.50. + At 8 months remaining we can drop the price down to $2.00. + This would be expressed as:: + + price_schedules=[('2.50', 11), ('2.00', 8)] + + :type client_token: string + :param client_token: Unique, case-sensitive identifier you provide + to ensure idempotency of the request. Maximum 64 ASCII characters. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of + :class:`boto.ec2.reservedinstance.ReservedInstanceListing` + + """ + params = { + 'ReservedInstancesId': reserved_instances_id, + 'InstanceCount': str(instance_count), + 'ClientToken': client_token, + } + for i, schedule in enumerate(price_schedules): + price, term = schedule + params['PriceSchedules.%s.Price' % i] = str(price) + params['PriceSchedules.%s.Term' % i] = str(term) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('CreateReservedInstancesListing', + params, [('item', ReservedInstanceListing)], verb='POST') + + def cancel_reserved_instances_listing(self, + reserved_instances_listing_ids=None, + dry_run=False): + """Cancels the specified Reserved Instance listing. + + :type reserved_instances_listing_ids: List of strings + :param reserved_instances_listing_ids: The ID of the + Reserved Instance listing to be cancelled. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of + :class:`boto.ec2.reservedinstance.ReservedInstanceListing` + + """ + params = {} + if reserved_instances_listing_ids is not None: + self.build_list_params(params, reserved_instances_listing_ids, + 'ReservedInstancesListingId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('CancelReservedInstancesListing', + params, [('item', ReservedInstanceListing)], verb='POST') + + def build_configurations_param_list(self, params, target_configurations): + for offset, tc in enumerate(target_configurations): + prefix = 'ReservedInstancesConfigurationSetItemType.%d.' % offset + if tc.availability_zone is not None: + params[prefix + 'AvailabilityZone'] = tc.availability_zone + if tc.platform is not None: + params[prefix + 'Platform'] = tc.platform + if tc.instance_count is not None: + params[prefix + 'InstanceCount'] = tc.instance_count + if tc.instance_type is not None: + params[prefix + 'InstanceType'] = tc.instance_type + + def modify_reserved_instances(self, client_token, reserved_instance_ids, + target_configurations): + """ + Modifies the specified Reserved Instances. + + :type client_token: string + :param client_token: A unique, case-sensitive, token you provide to + ensure idempotency of your modification request. + + :type reserved_instance_ids: List of strings + :param reserved_instance_ids: The IDs of the Reserved Instances to + modify. + + :type target_configurations: List of :class:`boto.ec2.reservedinstance.ReservedInstancesConfiguration` + :param target_configurations: The configuration settings for the + modified Reserved Instances. + + :rtype: string + :return: The unique ID for the submitted modification request. + """ + params = { + 'ClientToken': client_token, + } + if reserved_instance_ids is not None: + self.build_list_params(params, reserved_instance_ids, + 'ReservedInstancesId') + if target_configurations is not None: + self.build_configurations_param_list(params, target_configurations) + mrir = self.get_object( + 'ModifyReservedInstances', + params, + ModifyReservedInstancesResult, + verb='POST' + ) + return mrir.modification_id + + def describe_reserved_instances_modifications(self, + reserved_instances_modification_ids=None, next_token=None, + filters=None): + """ + A request to describe the modifications made to Reserved Instances in + your account. + + :type reserved_instances_modification_ids: list + :param reserved_instances_modification_ids: An optional list of + Reserved Instances modification IDs to describe. + + :type next_token: str + :param next_token: A string specifying the next paginated set + of results to return. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :rtype: list + :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstance` + """ + params = {} + if reserved_instances_modification_ids: + self.build_list_params(params, reserved_instances_modification_ids, + 'ReservedInstancesModificationId') + if next_token: + params['NextToken'] = next_token + if filters: + self.build_filter_params(params, filters) + return self.get_list('DescribeReservedInstancesModifications', + params, [('item', ReservedInstancesModification)], + verb='POST') + + # + # Monitoring + # + + def monitor_instances(self, instance_ids, dry_run=False): + """ + Enable detailed CloudWatch monitoring for the supplied instances. + + :type instance_id: list of strings + :param instance_id: The instance ids + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` + """ + params = {} + self.build_list_params(params, instance_ids, 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('MonitorInstances', params, + [('item', InstanceInfo)], verb='POST') + + def monitor_instance(self, instance_id, dry_run=False): + """ + Deprecated Version, maintained for backward compatibility. + Enable detailed CloudWatch monitoring for the supplied instance. + + :type instance_id: string + :param instance_id: The instance id + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` + """ + return self.monitor_instances([instance_id], dry_run=dry_run) + + def unmonitor_instances(self, instance_ids, dry_run=False): + """ + Disable CloudWatch monitoring for the supplied instance. + + :type instance_id: list of string + :param instance_id: The instance id + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` + """ + params = {} + self.build_list_params(params, instance_ids, 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('UnmonitorInstances', params, + [('item', InstanceInfo)], verb='POST') + + def unmonitor_instance(self, instance_id, dry_run=False): + """ + Deprecated Version, maintained for backward compatibility. + Disable detailed CloudWatch monitoring for the supplied instance. + + :type instance_id: string + :param instance_id: The instance id + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` + """ + return self.unmonitor_instances([instance_id], dry_run=dry_run) + + # + # Bundle Windows Instances + # + + def bundle_instance(self, instance_id, + s3_bucket, + s3_prefix, + s3_upload_policy, dry_run=False): + """ + Bundle Windows instance. + + :type instance_id: string + :param instance_id: The instance id + + :type s3_bucket: string + :param s3_bucket: The bucket in which the AMI should be stored. + + :type s3_prefix: string + :param s3_prefix: The beginning of the file name for the AMI. + + :type s3_upload_policy: string + :param s3_upload_policy: Base64 encoded policy that specifies condition + and permissions for Amazon EC2 to upload the + user's image into Amazon S3. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + + params = {'InstanceId': instance_id, + 'Storage.S3.Bucket': s3_bucket, + 'Storage.S3.Prefix': s3_prefix, + 'Storage.S3.UploadPolicy': s3_upload_policy} + s3auth = boto.auth.get_auth_handler(None, boto.config, + self.provider, ['s3']) + params['Storage.S3.AWSAccessKeyId'] = self.aws_access_key_id + signature = s3auth.sign_string(s3_upload_policy) + params['Storage.S3.UploadPolicySignature'] = signature + if dry_run: + params['DryRun'] = 'true' + return self.get_object('BundleInstance', params, + BundleInstanceTask, verb='POST') + + def get_all_bundle_tasks(self, bundle_ids=None, filters=None, + dry_run=False): + """ + Retrieve current bundling tasks. If no bundle id is specified, all + tasks are retrieved. + + :type bundle_ids: list + :param bundle_ids: A list of strings containing identifiers for + previously created bundling tasks. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {} + if bundle_ids: + self.build_list_params(params, bundle_ids, 'BundleId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeBundleTasks', params, + [('item', BundleInstanceTask)], verb='POST') + + def cancel_bundle_task(self, bundle_id, dry_run=False): + """ + Cancel a previously submitted bundle task + + :type bundle_id: string + :param bundle_id: The identifier of the bundle task to cancel. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'BundleId': bundle_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CancelBundleTask', params, + BundleInstanceTask, verb='POST') + + def get_password_data(self, instance_id, dry_run=False): + """ + Get encrypted administrator password for a Windows instance. + + :type instance_id: string + :param instance_id: The identifier of the instance to retrieve the + password for. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'InstanceId': instance_id} + if dry_run: + params['DryRun'] = 'true' + rs = self.get_object('GetPasswordData', params, ResultSet, verb='POST') + return rs.passwordData + + # + # Cluster Placement Groups + # + + def get_all_placement_groups(self, groupnames=None, filters=None, + dry_run=False): + """ + Get all placement groups associated with your account in a region. + + :type groupnames: list + :param groupnames: A list of the names of placement groups to retrieve. + If not provided, all placement groups will be + returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.placementgroup.PlacementGroup` + """ + params = {} + if groupnames: + self.build_list_params(params, groupnames, 'GroupName') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribePlacementGroups', params, + [('item', PlacementGroup)], verb='POST') + + def create_placement_group(self, name, strategy='cluster', dry_run=False): + """ + Create a new placement group for your account. + This will create the placement group within the region you + are currently connected to. + + :type name: string + :param name: The name of the new placement group + + :type strategy: string + :param strategy: The placement strategy of the new placement group. + Currently, the only acceptable value is "cluster". + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'GroupName': name, 'Strategy': strategy} + if dry_run: + params['DryRun'] = 'true' + group = self.get_status('CreatePlacementGroup', params, verb='POST') + return group + + def delete_placement_group(self, name, dry_run=False): + """ + Delete a placement group from your account. + + :type key_name: string + :param key_name: The name of the keypair to delete + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'GroupName': name} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeletePlacementGroup', params, verb='POST') + + # Tag methods + + def build_tag_param_list(self, params, tags): + keys = sorted(tags.keys()) + i = 1 + for key in keys: + value = tags[key] + params['Tag.%d.Key' % i] = key + if value is not None: + params['Tag.%d.Value' % i] = value + i += 1 + + def get_all_tags(self, filters=None, dry_run=False, max_results=None): + """ + Retrieve all the metadata tags associated with your account. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :rtype: list + :return: A list of :class:`boto.ec2.tag.Tag` objects + """ + params = {} + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + if max_results is not None: + params['MaxResults'] = max_results + return self.get_list('DescribeTags', params, + [('item', Tag)], verb='POST') + + def create_tags(self, resource_ids, tags, dry_run=False): + """ + Create new metadata tags for the specified resource ids. + + :type resource_ids: list + :param resource_ids: List of strings + + :type tags: dict + :param tags: A dictionary containing the name/value pairs. + If you want to create only a tag name, the + value for that tag should be the empty string + (e.g. ''). + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {} + self.build_list_params(params, resource_ids, 'ResourceId') + self.build_tag_param_list(params, tags) + if dry_run: + params['DryRun'] = 'true' + return self.get_status('CreateTags', params, verb='POST') + + def delete_tags(self, resource_ids, tags, dry_run=False): + """ + Delete metadata tags for the specified resource ids. + + :type resource_ids: list + :param resource_ids: List of strings + + :type tags: dict or list + :param tags: Either a dictionary containing name/value pairs + or a list containing just tag names. + If you pass in a dictionary, the values must + match the actual tag values or the tag will + not be deleted. If you pass in a value of None + for the tag value, all tags with that name will + be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + if isinstance(tags, list): + tags = {}.fromkeys(tags, None) + params = {} + self.build_list_params(params, resource_ids, 'ResourceId') + self.build_tag_param_list(params, tags) + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteTags', params, verb='POST') + + # Network Interface methods + + def get_all_network_interfaces(self, network_interface_ids=None, filters=None, dry_run=False): + """ + Retrieve all of the Elastic Network Interfaces (ENI's) + associated with your account. + + :type network_interface_ids: list + :param network_interface_ids: a list of strings representing ENI IDs + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.networkinterface.NetworkInterface` + """ + params = {} + if network_interface_ids: + self.build_list_params(params, network_interface_ids, 'NetworkInterfaceId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeNetworkInterfaces', params, + [('item', NetworkInterface)], verb='POST') + + def create_network_interface(self, subnet_id, private_ip_address=None, + description=None, groups=None, dry_run=False): + """ + Creates a network interface in the specified subnet. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to associate with the + network interface. + + :type private_ip_address: str + :param private_ip_address: The private IP address of the + network interface. If not supplied, one will be chosen + for you. + + :type description: str + :param description: The description of the network interface. + + :type groups: list + :param groups: Lists the groups for use by the network interface. + This can be either a list of group ID's or a list of + :class:`boto.ec2.securitygroup.SecurityGroup` objects. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.networkinterface.NetworkInterface` + :return: The newly created network interface. + """ + params = {'SubnetId': subnet_id} + if private_ip_address: + params['PrivateIpAddress'] = private_ip_address + if description: + params['Description'] = description + if groups: + ids = [] + for group in groups: + if isinstance(group, SecurityGroup): + ids.append(group.id) + else: + ids.append(group) + self.build_list_params(params, ids, 'SecurityGroupId') + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateNetworkInterface', params, + NetworkInterface, verb='POST') + + def attach_network_interface(self, network_interface_id, + instance_id, device_index, dry_run=False): + """ + Attaches a network interface to an instance. + + :type network_interface_id: str + :param network_interface_id: The ID of the network interface to attach. + + :type instance_id: str + :param instance_id: The ID of the instance that will be attached + to the network interface. + + :type device_index: int + :param device_index: The index of the device for the network + interface attachment on the instance. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'NetworkInterfaceId': network_interface_id, + 'InstanceId': instance_id, + 'DeviceIndex': device_index} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('AttachNetworkInterface', params, verb='POST') + + def detach_network_interface(self, attachment_id, force=False, + dry_run=False): + """ + Detaches a network interface from an instance. + + :type attachment_id: str + :param attachment_id: The ID of the attachment. + + :type force: bool + :param force: Set to true to force a detachment. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'AttachmentId': attachment_id} + if force: + params['Force'] = 'true' + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachNetworkInterface', params, verb='POST') + + def delete_network_interface(self, network_interface_id, dry_run=False): + """ + Delete the specified network interface. + + :type network_interface_id: str + :param network_interface_id: The ID of the network interface to delete. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'NetworkInterfaceId': network_interface_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteNetworkInterface', params, verb='POST') + + def get_all_instance_types(self): + """ + Get all instance_types available on this cloud (eucalyptus specific) + + :rtype: list of :class:`boto.ec2.instancetype.InstanceType` + :return: The requested InstanceType objects + """ + params = {} + return self.get_list('DescribeInstanceTypes', params, [('item', InstanceType)], verb='POST') + + def copy_image(self, source_region, source_image_id, name=None, + description=None, client_token=None, dry_run=False): + """ + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + :rtype: :class:`boto.ec2.image.CopyImage` + :return: Object containing the image_id of the copied image. + """ + params = { + 'SourceRegion': source_region, + 'SourceImageId': source_image_id, + } + if name is not None: + params['Name'] = name + if description is not None: + params['Description'] = description + if client_token is not None: + params['ClientToken'] = client_token + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CopyImage', params, CopyImage, + verb='POST') + + def describe_account_attributes(self, attribute_names=None, dry_run=False): + """ + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {} + if attribute_names is not None: + self.build_list_params(params, attribute_names, 'AttributeName') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeAccountAttributes', params, + [('item', AccountAttribute)], verb='POST') + + def describe_vpc_attribute(self, vpc_id, attribute=None, dry_run=False): + """ + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = { + 'VpcId': vpc_id + } + if attribute is not None: + params['Attribute'] = attribute + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeVpcAttribute', params, + VPCAttribute, verb='POST') + + def modify_vpc_attribute(self, vpc_id, enable_dns_support=None, + enable_dns_hostnames=None, dry_run=False): + """ + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = { + 'VpcId': vpc_id + } + if enable_dns_support is not None: + params['EnableDnsSupport.Value'] = ( + 'true' if enable_dns_support else 'false') + if enable_dns_hostnames is not None: + params['EnableDnsHostnames.Value'] = ( + 'true' if enable_dns_hostnames else 'false') + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifyVpcAttribute', params, verb='POST') + + def get_all_classic_link_instances(self, instance_ids=None, filters=None, + dry_run=False, max_results=None, + next_token=None): + """ + Get all of your linked EC2-Classic instances. This request only + returns information about EC2-Classic instances linked to + a VPC through ClassicLink + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs. Must be + instances linked to a VPC through ClassicLink. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :rtype: list + :return: A list of :class:`boto.ec2.instance.Instance` + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + if max_results is not None: + params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeClassicLinkInstances', params, + [('item', Instance)], verb='POST') diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/ec2object.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/ec2object.py new file mode 100644 index 0000000000000000000000000000000000000000..fa50a9fcc7e793e0c6e8a39ad3b52ba7f6692bef --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/ec2object.py @@ -0,0 +1,144 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Object +""" +from boto.ec2.tag import TagSet + + +class EC2Object(object): + + def __init__(self, connection=None): + self.connection = connection + if self.connection and hasattr(self.connection, 'region'): + self.region = connection.region + else: + self.region = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class TaggedEC2Object(EC2Object): + """ + Any EC2 resource that can be tagged should be represented + by a Python object that subclasses this class. This class + has the mechanism in place to handle the tagSet element in + the Describe* responses. If tags are found, it will create + a TagSet object and allow it to parse and collect the tags + into a dict that is stored in the "tags" attribute of the + object. + """ + + def __init__(self, connection=None): + super(TaggedEC2Object, self).__init__(connection) + self.tags = TagSet() + + def startElement(self, name, attrs, connection): + if name == 'tagSet': + return self.tags + else: + return None + + def add_tag(self, key, value='', dry_run=False): + """ + Add a tag to this object. Tags are stored by AWS and can be used + to organize and filter resources. Adding a tag involves a round-trip + to the EC2 service. + + :type key: str + :param key: The key or name of the tag being stored. + + :type value: str + :param value: An optional value that can be stored with the tag. + If you want only the tag name and no value, the + value should be the empty string. + """ + self.add_tags({key: value}, dry_run) + + def add_tags(self, tags, dry_run=False): + """ + Add tags to this object. Tags are stored by AWS and can be used + to organize and filter resources. Adding tags involves a round-trip + to the EC2 service. + + :type tags: dict + :param tags: A dictionary of key-value pairs for the tags being stored. + If for some tags you want only the name and no value, the + corresponding value for that tag name should be an empty + string. + """ + status = self.connection.create_tags( + [self.id], + tags, + dry_run=dry_run + ) + if self.tags is None: + self.tags = TagSet() + self.tags.update(tags) + + def remove_tag(self, key, value=None, dry_run=False): + """ + Remove a tag from this object. Removing a tag involves a round-trip + to the EC2 service. + + :type key: str + :param key: The key or name of the tag being stored. + + :type value: str + :param value: An optional value that can be stored with the tag. + If a value is provided, it must match the value currently + stored in EC2. If not, the tag will not be removed. If + a value of None is provided, the tag will be + unconditionally deleted. + NOTE: There is an important distinction between a value + of '' and a value of None. + """ + self.remove_tags({key: value}, dry_run) + + def remove_tags(self, tags, dry_run=False): + """ + Removes tags from this object. Removing tags involves a round-trip + to the EC2 service. + + :type tags: dict + :param tags: A dictionary of key-value pairs for the tags being removed. + For each key, the provided value must match the value + currently stored in EC2. If not, that particular tag will + not be removed. However, if a value of None is provided, + the tag will be unconditionally deleted. + NOTE: There is an important distinction between a value of + '' and a value of None. + """ + status = self.connection.delete_tags( + [self.id], + tags, + dry_run=dry_run + ) + for key, value in tags.items(): + if key in self.tags: + if value is None or value == self.tags[key]: + del self.tags[key] diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bae45cf42bf1a7e1f4ed5248601efe3247aaa072 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/__init__.py @@ -0,0 +1,758 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This module provides an interface to the Elastic Compute Cloud (EC2) +load balancing service from AWS. +""" +from boto.connection import AWSQueryConnection +from boto.ec2.instanceinfo import InstanceInfo +from boto.ec2.elb.loadbalancer import LoadBalancer, LoadBalancerZones +from boto.ec2.elb.instancestate import InstanceState +from boto.ec2.elb.healthcheck import HealthCheck +from boto.regioninfo import RegionInfo, get_regions, load_regions +import boto +from boto.compat import six + +RegionData = load_regions().get('elasticloadbalancing', {}) + + +def regions(): + """ + Get all available regions for the ELB service. + + :rtype: list + :return: A list of :class:`boto.RegionInfo` instances + """ + return get_regions('elasticloadbalancing', connection_cls=ELBConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.ec2.elb.ELBConnection`. + + :param str region_name: The name of the region to connect to. + + :rtype: :class:`boto.ec2.ELBConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None + + +class ELBConnection(AWSQueryConnection): + + APIVersion = boto.config.get('Boto', 'elb_version', '2012-06-01') + DefaultRegionName = boto.config.get('Boto', 'elb_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get( + 'Boto', 'elb_region_endpoint', + 'elasticloadbalancing.us-east-1.amazonaws.com') + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + """ + Init method to create a new connection to EC2 Load Balancing Service. + + .. note:: The region argument is overridden by the region specified in + the boto configuration file. + """ + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(ELBConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def build_list_params(self, params, items, label): + if isinstance(items, six.string_types): + items = [items] + for index, item in enumerate(items): + params[label % (index + 1)] = item + + def get_all_load_balancers(self, load_balancer_names=None, marker=None): + """ + Retrieve all load balancers associated with your account. + + :type load_balancer_names: list + :keyword load_balancer_names: An optional list of load balancer names. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :rtype: :py:class:`boto.resultset.ResultSet` + :return: A ResultSet containing instances of + :class:`boto.ec2.elb.loadbalancer.LoadBalancer` + """ + params = {} + if load_balancer_names: + self.build_list_params(params, load_balancer_names, + 'LoadBalancerNames.member.%d') + + if marker: + params['Marker'] = marker + + return self.get_list('DescribeLoadBalancers', params, + [('member', LoadBalancer)]) + + def create_load_balancer(self, name, zones, listeners=None, subnets=None, + security_groups=None, scheme='internet-facing', + complex_listeners=None): + """ + Create a new load balancer for your account. By default the load + balancer will be created in EC2. To create a load balancer inside a + VPC, parameter zones must be set to None and subnets must not be None. + The load balancer will be automatically created under the VPC that + contains the subnet(s) specified. + + :type name: string + :param name: The mnemonic name associated with the new load balancer + + :type zones: List of strings + :param zones: The names of the availability zone(s) to add. + + :type listeners: List of tuples + :param listeners: Each tuple contains three or four values, + (LoadBalancerPortNumber, InstancePortNumber, Protocol, + [SSLCertificateId]) where LoadBalancerPortNumber and + InstancePortNumber are integer values between 1 and 65535, + Protocol is a string containing either 'TCP', 'SSL', HTTP', or + 'HTTPS'; SSLCertificateID is the ARN of a AWS IAM + certificate, and must be specified when doing HTTPS. + + :type subnets: list of strings + :param subnets: A list of subnet IDs in your VPC to attach to + your LoadBalancer. + + :type security_groups: list of strings + :param security_groups: The security groups assigned to your + LoadBalancer within your VPC. + + :type scheme: string + :param scheme: The type of a LoadBalancer. By default, Elastic + Load Balancing creates an internet-facing LoadBalancer with + a publicly resolvable DNS name, which resolves to public IP + addresses. + + Specify the value internal for this option to create an + internal LoadBalancer with a DNS name that resolves to + private IP addresses. + + This option is only available for LoadBalancers attached + to an Amazon VPC. + + :type complex_listeners: List of tuples + :param complex_listeners: Each tuple contains four or five values, + (LoadBalancerPortNumber, InstancePortNumber, Protocol, + InstanceProtocol, SSLCertificateId). + + Where: + - LoadBalancerPortNumber and InstancePortNumber are integer + values between 1 and 65535 + - Protocol and InstanceProtocol is a string containing + either 'TCP', + 'SSL', 'HTTP', or 'HTTPS' + - SSLCertificateId is the ARN of an SSL certificate loaded into + AWS IAM + + :rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer` + :return: The newly created + :class:`boto.ec2.elb.loadbalancer.LoadBalancer` + """ + if not listeners and not complex_listeners: + # Must specify one of the two options + return None + + params = {'LoadBalancerName': name, + 'Scheme': scheme} + + # Handle legacy listeners + if listeners: + for index, listener in enumerate(listeners): + i = index + 1 + protocol = listener[2].upper() + params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] + params['Listeners.member.%d.InstancePort' % i] = listener[1] + params['Listeners.member.%d.Protocol' % i] = listener[2] + if protocol == 'HTTPS' or protocol == 'SSL': + params['Listeners.member.%d.SSLCertificateId' % i] = listener[3] + + # Handle the full listeners + if complex_listeners: + for index, listener in enumerate(complex_listeners): + i = index + 1 + protocol = listener[2].upper() + InstanceProtocol = listener[3].upper() + params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] + params['Listeners.member.%d.InstancePort' % i] = listener[1] + params['Listeners.member.%d.Protocol' % i] = listener[2] + params['Listeners.member.%d.InstanceProtocol' % i] = listener[3] + if protocol == 'HTTPS' or protocol == 'SSL': + params['Listeners.member.%d.SSLCertificateId' % i] = listener[4] + + if zones: + self.build_list_params(params, zones, 'AvailabilityZones.member.%d') + + if subnets: + self.build_list_params(params, subnets, 'Subnets.member.%d') + + if security_groups: + self.build_list_params(params, security_groups, + 'SecurityGroups.member.%d') + + load_balancer = self.get_object('CreateLoadBalancer', + params, LoadBalancer) + load_balancer.name = name + load_balancer.listeners = listeners + load_balancer.availability_zones = zones + load_balancer.subnets = subnets + load_balancer.security_groups = security_groups + return load_balancer + + def create_load_balancer_listeners(self, name, listeners=None, + complex_listeners=None): + """ + Creates a Listener (or group of listeners) for an existing + Load Balancer + + :type name: string + :param name: The name of the load balancer to create the listeners for + + :type listeners: List of tuples + :param listeners: Each tuple contains three or four values, + (LoadBalancerPortNumber, InstancePortNumber, Protocol, + [SSLCertificateId]) where LoadBalancerPortNumber and + InstancePortNumber are integer values between 1 and 65535, + Protocol is a string containing either 'TCP', 'SSL', HTTP', or + 'HTTPS'; SSLCertificateID is the ARN of a AWS IAM + certificate, and must be specified when doing HTTPS. + + :type complex_listeners: List of tuples + :param complex_listeners: Each tuple contains four or five values, + (LoadBalancerPortNumber, InstancePortNumber, Protocol, + InstanceProtocol, SSLCertificateId). + + Where: + - LoadBalancerPortNumber and InstancePortNumber are integer + values between 1 and 65535 + - Protocol and InstanceProtocol is a string containing + either 'TCP', + 'SSL', 'HTTP', or 'HTTPS' + - SSLCertificateId is the ARN of an SSL certificate loaded into + AWS IAM + + :return: The status of the request + """ + if not listeners and not complex_listeners: + # Must specify one of the two options + return None + + params = {'LoadBalancerName': name} + + # Handle the simple listeners + if listeners: + for index, listener in enumerate(listeners): + i = index + 1 + protocol = listener[2].upper() + params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] + params['Listeners.member.%d.InstancePort' % i] = listener[1] + params['Listeners.member.%d.Protocol' % i] = listener[2] + if protocol == 'HTTPS' or protocol == 'SSL': + params['Listeners.member.%d.SSLCertificateId' % i] = listener[3] + + # Handle the full listeners + if complex_listeners: + for index, listener in enumerate(complex_listeners): + i = index + 1 + protocol = listener[2].upper() + InstanceProtocol = listener[3].upper() + params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] + params['Listeners.member.%d.InstancePort' % i] = listener[1] + params['Listeners.member.%d.Protocol' % i] = listener[2] + params['Listeners.member.%d.InstanceProtocol' % i] = listener[3] + if protocol == 'HTTPS' or protocol == 'SSL': + params['Listeners.member.%d.SSLCertificateId' % i] = listener[4] + + return self.get_status('CreateLoadBalancerListeners', params) + + def delete_load_balancer(self, name): + """ + Delete a Load Balancer from your account. + + :type name: string + :param name: The name of the Load Balancer to delete + """ + params = {'LoadBalancerName': name} + return self.get_status('DeleteLoadBalancer', params) + + def delete_load_balancer_listeners(self, name, ports): + """ + Deletes a load balancer listener (or group of listeners) + + :type name: string + :param name: The name of the load balancer to create the listeners for + + :type ports: List int + :param ports: Each int represents the port on the ELB to be removed + + :return: The status of the request + """ + params = {'LoadBalancerName': name} + for index, port in enumerate(ports): + params['LoadBalancerPorts.member.%d' % (index + 1)] = port + return self.get_status('DeleteLoadBalancerListeners', params) + + def enable_availability_zones(self, load_balancer_name, zones_to_add): + """ + Add availability zones to an existing Load Balancer + All zones must be in the same region as the Load Balancer + Adding zones that are already registered with the Load Balancer + has no effect. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type zones: List of strings + :param zones: The name of the zone(s) to add. + + :rtype: List of strings + :return: An updated list of zones for this Load Balancer. + + """ + params = {'LoadBalancerName': load_balancer_name} + self.build_list_params(params, zones_to_add, + 'AvailabilityZones.member.%d') + obj = self.get_object('EnableAvailabilityZonesForLoadBalancer', + params, LoadBalancerZones) + return obj.zones + + def disable_availability_zones(self, load_balancer_name, zones_to_remove): + """ + Remove availability zones from an existing Load Balancer. + All zones must be in the same region as the Load Balancer. + Removing zones that are not registered with the Load Balancer + has no effect. + You cannot remove all zones from an Load Balancer. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type zones: List of strings + :param zones: The name of the zone(s) to remove. + + :rtype: List of strings + :return: An updated list of zones for this Load Balancer. + + """ + params = {'LoadBalancerName': load_balancer_name} + self.build_list_params(params, zones_to_remove, + 'AvailabilityZones.member.%d') + obj = self.get_object('DisableAvailabilityZonesForLoadBalancer', + params, LoadBalancerZones) + return obj.zones + + def modify_lb_attribute(self, load_balancer_name, attribute, value): + """Changes an attribute of a Load Balancer + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type attribute: string + :param attribute: The attribute you wish to change. + + * crossZoneLoadBalancing - Boolean (true) + * connectingSettings - :py:class:`ConnectionSettingAttribute` instance + * accessLog - :py:class:`AccessLogAttribute` instance + * connectionDraining - :py:class:`ConnectionDrainingAttribute` instance + + :type value: string + :param value: The new value for the attribute + + :rtype: bool + :return: Whether the operation succeeded or not + """ + + bool_reqs = ('crosszoneloadbalancing',) + if attribute.lower() in bool_reqs: + if isinstance(value, bool): + if value: + value = 'true' + else: + value = 'false' + + params = {'LoadBalancerName': load_balancer_name} + if attribute.lower() == 'crosszoneloadbalancing': + params['LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled' + ] = value + elif attribute.lower() == 'accesslog': + params['LoadBalancerAttributes.AccessLog.Enabled'] = \ + value.enabled and 'true' or 'false' + params['LoadBalancerAttributes.AccessLog.S3BucketName'] = \ + value.s3_bucket_name + params['LoadBalancerAttributes.AccessLog.S3BucketPrefix'] = \ + value.s3_bucket_prefix + params['LoadBalancerAttributes.AccessLog.EmitInterval'] = \ + value.emit_interval + elif attribute.lower() == 'connectiondraining': + params['LoadBalancerAttributes.ConnectionDraining.Enabled'] = \ + value.enabled and 'true' or 'false' + params['LoadBalancerAttributes.ConnectionDraining.Timeout'] = \ + value.timeout + elif attribute.lower() == 'connectingsettings': + params['LoadBalancerAttributes.ConnectionSettings.IdleTimeout'] = \ + value.idle_timeout + else: + raise ValueError('InvalidAttribute', attribute) + return self.get_status('ModifyLoadBalancerAttributes', params, + verb='GET') + + def get_all_lb_attributes(self, load_balancer_name): + """Gets all Attributes of a Load Balancer + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :rtype: boto.ec2.elb.attribute.LbAttributes + :return: The attribute object of the ELB. + """ + from boto.ec2.elb.attributes import LbAttributes + params = {'LoadBalancerName': load_balancer_name} + return self.get_object('DescribeLoadBalancerAttributes', + params, LbAttributes) + + def get_lb_attribute(self, load_balancer_name, attribute): + """Gets an attribute of a Load Balancer + + This will make an EC2 call for each method call. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type attribute: string + :param attribute: The attribute you wish to see. + + * accessLog - :py:class:`AccessLogAttribute` instance + * crossZoneLoadBalancing - Boolean + * connectingSettings - :py:class:`ConnectionSettingAttribute` instance + * connectionDraining - :py:class:`ConnectionDrainingAttribute` + instance + + :rtype: Attribute dependent + :return: The new value for the attribute + """ + attributes = self.get_all_lb_attributes(load_balancer_name) + if attribute.lower() == 'accesslog': + return attributes.access_log + if attribute.lower() == 'crosszoneloadbalancing': + return attributes.cross_zone_load_balancing.enabled + if attribute.lower() == 'connectiondraining': + return attributes.connection_draining + if attribute.lower() == 'connectingsettings': + return attributes.connecting_settings + return None + + def register_instances(self, load_balancer_name, instances): + """ + Add new Instances to an existing Load Balancer. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type instances: List of strings + :param instances: The instance ID's of the EC2 instances to add. + + :rtype: List of strings + :return: An updated list of instances for this Load Balancer. + + """ + params = {'LoadBalancerName': load_balancer_name} + self.build_list_params(params, instances, + 'Instances.member.%d.InstanceId') + return self.get_list('RegisterInstancesWithLoadBalancer', + params, [('member', InstanceInfo)]) + + def deregister_instances(self, load_balancer_name, instances): + """ + Remove Instances from an existing Load Balancer. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type instances: List of strings + :param instances: The instance ID's of the EC2 instances to remove. + + :rtype: List of strings + :return: An updated list of instances for this Load Balancer. + + """ + params = {'LoadBalancerName': load_balancer_name} + self.build_list_params(params, instances, + 'Instances.member.%d.InstanceId') + return self.get_list('DeregisterInstancesFromLoadBalancer', + params, [('member', InstanceInfo)]) + + def describe_instance_health(self, load_balancer_name, instances=None): + """ + Get current state of all Instances registered to an Load Balancer. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type instances: List of strings + :param instances: The instance ID's of the EC2 instances + to return status for. If not provided, + the state of all instances will be returned. + + :rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState` + :return: list of state info for instances in this Load Balancer. + + """ + params = {'LoadBalancerName': load_balancer_name} + if instances: + self.build_list_params(params, instances, + 'Instances.member.%d.InstanceId') + return self.get_list('DescribeInstanceHealth', params, + [('member', InstanceState)]) + + def configure_health_check(self, name, health_check): + """ + Define a health check for the EndPoints. + + :type name: string + :param name: The mnemonic name associated with the load balancer + + :type health_check: :class:`boto.ec2.elb.healthcheck.HealthCheck` + :param health_check: A HealthCheck object populated with the desired + values. + + :rtype: :class:`boto.ec2.elb.healthcheck.HealthCheck` + :return: The updated :class:`boto.ec2.elb.healthcheck.HealthCheck` + """ + params = {'LoadBalancerName': name, + 'HealthCheck.Timeout': health_check.timeout, + 'HealthCheck.Target': health_check.target, + 'HealthCheck.Interval': health_check.interval, + 'HealthCheck.UnhealthyThreshold': health_check.unhealthy_threshold, + 'HealthCheck.HealthyThreshold': health_check.healthy_threshold} + return self.get_object('ConfigureHealthCheck', params, HealthCheck) + + def set_lb_listener_SSL_certificate(self, lb_name, lb_port, + ssl_certificate_id): + """ + Sets the certificate that terminates the specified listener's SSL + connections. The specified certificate replaces any prior certificate + that was used on the same LoadBalancer and port. + """ + params = {'LoadBalancerName': lb_name, + 'LoadBalancerPort': lb_port, + 'SSLCertificateId': ssl_certificate_id} + return self.get_status('SetLoadBalancerListenerSSLCertificate', params) + + def create_app_cookie_stickiness_policy(self, name, lb_name, policy_name): + """ + Generates a stickiness policy with sticky session lifetimes that follow + that of an application-generated cookie. This policy can only be + associated with HTTP listeners. + + This policy is similar to the policy created by + CreateLBCookieStickinessPolicy, except that the lifetime of the special + Elastic Load Balancing cookie follows the lifetime of the + application-generated cookie specified in the policy configuration. The + load balancer only inserts a new stickiness cookie when the application + response includes a new application cookie. + + If the application cookie is explicitly removed or expires, the session + stops being sticky until a new application cookie is issued. + """ + params = {'CookieName': name, + 'LoadBalancerName': lb_name, + 'PolicyName': policy_name} + return self.get_status('CreateAppCookieStickinessPolicy', params) + + def create_lb_cookie_stickiness_policy(self, cookie_expiration_period, + lb_name, policy_name): + """ + Generates a stickiness policy with sticky session lifetimes controlled + by the lifetime of the browser (user-agent) or a specified expiration + period. This policy can only be associated only with HTTP listeners. + + When a load balancer implements this policy, the load balancer uses a + special cookie to track the backend server instance for each request. + When the load balancer receives a request, it first checks to see if + this cookie is present in the request. If so, the load balancer sends + the request to the application server specified in the cookie. If not, + the load balancer sends the request to a server that is chosen based on + the existing load balancing algorithm. + + A cookie is inserted into the response for binding subsequent requests + from the same user to that server. The validity of the cookie is based + on the cookie expiration time, which is specified in the policy + configuration. + + None may be passed for cookie_expiration_period. + """ + params = {'LoadBalancerName': lb_name, + 'PolicyName': policy_name} + if cookie_expiration_period is not None: + params['CookieExpirationPeriod'] = cookie_expiration_period + return self.get_status('CreateLBCookieStickinessPolicy', params) + + def create_lb_policy(self, lb_name, policy_name, policy_type, + policy_attributes): + """ + Creates a new policy that contains the necessary attributes + depending on the policy type. Policies are settings that are + saved for your load balancer and that can be applied to the + front-end listener, or the back-end application server. + + """ + params = {'LoadBalancerName': lb_name, + 'PolicyName': policy_name, + 'PolicyTypeName': policy_type} + for index, (name, value) in enumerate(six.iteritems(policy_attributes), 1): + params['PolicyAttributes.member.%d.AttributeName' % index] = name + params['PolicyAttributes.member.%d.AttributeValue' % index] = value + else: + params['PolicyAttributes'] = '' + return self.get_status('CreateLoadBalancerPolicy', params) + + def delete_lb_policy(self, lb_name, policy_name): + """ + Deletes a policy from the LoadBalancer. The specified policy must not + be enabled for any listeners. + """ + params = {'LoadBalancerName': lb_name, + 'PolicyName': policy_name} + return self.get_status('DeleteLoadBalancerPolicy', params) + + def set_lb_policies_of_listener(self, lb_name, lb_port, policies): + """ + Associates, updates, or disables a policy with a listener on the load + balancer. Currently only zero (0) or one (1) policy can be associated + with a listener. + """ + params = {'LoadBalancerName': lb_name, + 'LoadBalancerPort': lb_port} + if len(policies): + self.build_list_params(params, policies, 'PolicyNames.member.%d') + else: + params['PolicyNames'] = '' + return self.get_status('SetLoadBalancerPoliciesOfListener', params) + + def set_lb_policies_of_backend_server(self, lb_name, instance_port, + policies): + """ + Replaces the current set of policies associated with a port on which + the back-end server is listening with a new set of policies. + """ + params = {'LoadBalancerName': lb_name, + 'InstancePort': instance_port} + if policies: + self.build_list_params(params, policies, 'PolicyNames.member.%d') + else: + params['PolicyNames'] = '' + return self.get_status('SetLoadBalancerPoliciesForBackendServer', + params) + + def apply_security_groups_to_lb(self, name, security_groups): + """ + Associates one or more security groups with the load balancer. + The provided security groups will override any currently applied + security groups. + + :type name: string + :param name: The name of the Load Balancer + + :type security_groups: List of strings + :param security_groups: The name of the security group(s) to add. + + :rtype: List of strings + :return: An updated list of security groups for this Load Balancer. + + """ + params = {'LoadBalancerName': name} + self.build_list_params(params, security_groups, + 'SecurityGroups.member.%d') + return self.get_list('ApplySecurityGroupsToLoadBalancer', + params, None) + + def attach_lb_to_subnets(self, name, subnets): + """ + Attaches load balancer to one or more subnets. + Attaching subnets that are already registered with the + Load Balancer has no effect. + + :type name: string + :param name: The name of the Load Balancer + + :type subnets: List of strings + :param subnets: The name of the subnet(s) to add. + + :rtype: List of strings + :return: An updated list of subnets for this Load Balancer. + + """ + params = {'LoadBalancerName': name} + self.build_list_params(params, subnets, + 'Subnets.member.%d') + return self.get_list('AttachLoadBalancerToSubnets', + params, None) + + def detach_lb_from_subnets(self, name, subnets): + """ + Detaches load balancer from one or more subnets. + + :type name: string + :param name: The name of the Load Balancer + + :type subnets: List of strings + :param subnets: The name of the subnet(s) to detach. + + :rtype: List of strings + :return: An updated list of subnets for this Load Balancer. + + """ + params = {'LoadBalancerName': name} + self.build_list_params(params, subnets, + 'Subnets.member.%d') + return self.get_list('DetachLoadBalancerFromSubnets', + params, None) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/attributes.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/attributes.py new file mode 100644 index 0000000000000000000000000000000000000000..605e5d54a7fe4d218c956c59cfd6381f85f9aca0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/attributes.py @@ -0,0 +1,154 @@ +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# Created by Chris Huegle for TellApart, Inc. + +class ConnectionSettingAttribute(object): + """ + Represents the ConnectionSetting segment of ELB Attributes. + """ + def __init__(self, connection=None): + self.idle_timeout = None + + def __repr__(self): + return 'ConnectionSettingAttribute(%s)' % ( + self.idle_timeout) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'IdleTimeout': + self.idle_timeout = int(value) + +class CrossZoneLoadBalancingAttribute(object): + """ + Represents the CrossZoneLoadBalancing segement of ELB Attributes. + """ + def __init__(self, connection=None): + self.enabled = None + + def __repr__(self): + return 'CrossZoneLoadBalancingAttribute(%s)' % ( + self.enabled) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + + +class AccessLogAttribute(object): + """ + Represents the AccessLog segment of ELB attributes. + """ + def __init__(self, connection=None): + self.enabled = None + self.s3_bucket_name = None + self.s3_bucket_prefix = None + self.emit_interval = None + + def __repr__(self): + return 'AccessLog(%s, %s, %s, %s)' % ( + self.enabled, + self.s3_bucket_name, + self.s3_bucket_prefix, + self.emit_interval + ) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'S3BucketName': + self.s3_bucket_name = value + elif name == 'S3BucketPrefix': + self.s3_bucket_prefix = value + elif name == 'EmitInterval': + self.emit_interval = int(value) + + +class ConnectionDrainingAttribute(object): + """ + Represents the ConnectionDraining segment of ELB attributes. + """ + def __init__(self, connection=None): + self.enabled = None + self.timeout = None + + def __repr__(self): + return 'ConnectionDraining(%s, %s)' % ( + self.enabled, + self.timeout + ) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'Timeout': + self.timeout = int(value) + + +class LbAttributes(object): + """ + Represents the Attributes of an Elastic Load Balancer. + """ + def __init__(self, connection=None): + self.connection = connection + self.cross_zone_load_balancing = CrossZoneLoadBalancingAttribute( + self.connection) + self.access_log = AccessLogAttribute(self.connection) + self.connection_draining = ConnectionDrainingAttribute(self.connection) + self.connecting_settings = ConnectionSettingAttribute(self.connection) + + def __repr__(self): + return 'LbAttributes(%s, %s, %s, %s)' % ( + repr(self.cross_zone_load_balancing), + repr(self.access_log), + repr(self.connection_draining), + repr(self.connecting_settings)) + + def startElement(self, name, attrs, connection): + if name == 'CrossZoneLoadBalancing': + return self.cross_zone_load_balancing + if name == 'AccessLog': + return self.access_log + if name == 'ConnectionDraining': + return self.connection_draining + if name == 'ConnectionSettings': + return self.connecting_settings + + def endElement(self, name, value, connection): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/healthcheck.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/healthcheck.py new file mode 100644 index 0000000000000000000000000000000000000000..040f9623004fa25e5578cef298c338e52feb1389 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/healthcheck.py @@ -0,0 +1,89 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class HealthCheck(object): + """ + Represents an EC2 Access Point Health Check. See + :ref:`elb-configuring-a-health-check` for a walkthrough on configuring + load balancer health checks. + """ + def __init__(self, access_point=None, interval=30, target=None, + healthy_threshold=3, timeout=5, unhealthy_threshold=5): + """ + :ivar str access_point: The name of the load balancer this + health check is associated with. + :ivar int interval: Specifies how many seconds there are between + health checks. + :ivar str target: Determines what to check on an instance. See the + Amazon HealthCheck_ documentation for possible Target values. + + .. _HealthCheck: http://docs.amazonwebservices.com/ElasticLoadBalancing/latest/APIReference/API_HealthCheck.html + """ + self.access_point = access_point + self.interval = interval + self.target = target + self.healthy_threshold = healthy_threshold + self.timeout = timeout + self.unhealthy_threshold = unhealthy_threshold + + def __repr__(self): + return 'HealthCheck:%s' % self.target + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Interval': + self.interval = int(value) + elif name == 'Target': + self.target = value + elif name == 'HealthyThreshold': + self.healthy_threshold = int(value) + elif name == 'Timeout': + self.timeout = int(value) + elif name == 'UnhealthyThreshold': + self.unhealthy_threshold = int(value) + else: + setattr(self, name, value) + + def update(self): + """ + In the case where you have accessed an existing health check on a + load balancer, this method applies this instance's health check + values to the load balancer it is attached to. + + .. note:: This method will not do anything if the :py:attr:`access_point` + attribute isn't set, as is the case with a newly instantiated + HealthCheck instance. + """ + if not self.access_point: + return + + new_hc = self.connection.configure_health_check(self.access_point, + self) + self.interval = new_hc.interval + self.target = new_hc.target + self.healthy_threshold = new_hc.healthy_threshold + self.unhealthy_threshold = new_hc.unhealthy_threshold + self.timeout = new_hc.timeout diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/instancestate.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/instancestate.py new file mode 100644 index 0000000000000000000000000000000000000000..dd61c123ce3f28a3bee63de7e930af2a46d163be --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/instancestate.py @@ -0,0 +1,63 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class InstanceState(object): + """ + Represents the state of an EC2 Load Balancer Instance + """ + + def __init__(self, load_balancer=None, description=None, + state=None, instance_id=None, reason_code=None): + """ + :ivar boto.ec2.elb.loadbalancer.LoadBalancer load_balancer: The + load balancer this instance is registered to. + :ivar str description: A description of the instance. + :ivar str instance_id: The EC2 instance ID. + :ivar str reason_code: Provides information about the cause of + an OutOfService instance. Specifically, it indicates whether the + cause is Elastic Load Balancing or the instance behind the + LoadBalancer. + :ivar str state: Specifies the current state of the instance. + """ + self.load_balancer = load_balancer + self.description = description + self.state = state + self.instance_id = instance_id + self.reason_code = reason_code + + def __repr__(self): + return 'InstanceState:(%s,%s)' % (self.instance_id, self.state) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Description': + self.description = value + elif name == 'State': + self.state = value + elif name == 'InstanceId': + self.instance_id = value + elif name == 'ReasonCode': + self.reason_code = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/listelement.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/listelement.py new file mode 100644 index 0000000000000000000000000000000000000000..0fe3a1e8ebda83b8be9ef96d9da7c8ad0736003e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/listelement.py @@ -0,0 +1,36 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class ListElement(list): + """ + A :py:class:`list` subclass that has some additional methods + for interacting with Amazon's XML API. + """ + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'member': + self.append(value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/listener.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/listener.py new file mode 100644 index 0000000000000000000000000000000000000000..ed33b131a65429d635e0dcfe151d1c9c51c02b90 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/listener.py @@ -0,0 +1,87 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.elb.listelement import ListElement + + +class Listener(object): + """ + Represents an EC2 Load Balancer Listener tuple + """ + + def __init__(self, load_balancer=None, load_balancer_port=0, + instance_port=0, protocol='', ssl_certificate_id=None, instance_protocol=None): + self.load_balancer = load_balancer + self.load_balancer_port = load_balancer_port + self.instance_port = instance_port + self.protocol = protocol + self.instance_protocol = instance_protocol + self.ssl_certificate_id = ssl_certificate_id + self.policy_names = ListElement() + + def __repr__(self): + r = "(%d, %d, '%s'" % (self.load_balancer_port, self.instance_port, self.protocol) + if self.instance_protocol: + r += ", '%s'" % self.instance_protocol + if self.ssl_certificate_id: + r += ', %s' % (self.ssl_certificate_id) + r += ')' + return r + + def startElement(self, name, attrs, connection): + if name == 'PolicyNames': + return self.policy_names + return None + + def endElement(self, name, value, connection): + if name == 'LoadBalancerPort': + self.load_balancer_port = int(value) + elif name == 'InstancePort': + self.instance_port = int(value) + elif name == 'InstanceProtocol': + self.instance_protocol = value + elif name == 'Protocol': + self.protocol = value + elif name == 'SSLCertificateId': + self.ssl_certificate_id = value + else: + setattr(self, name, value) + + def get_tuple(self): + return self.load_balancer_port, self.instance_port, self.protocol + + def get_complex_tuple(self): + return self.load_balancer_port, self.instance_port, self.protocol, self.instance_protocol + + def __getitem__(self, key): + if key == 0: + return self.load_balancer_port + if key == 1: + return self.instance_port + if key == 2: + return self.protocol + if key == 3: + return self.instance_protocol + if key == 4: + return self.ssl_certificate_id + raise KeyError diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/loadbalancer.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/loadbalancer.py new file mode 100644 index 0000000000000000000000000000000000000000..501544941facfa397e722b1bf8805f4f7a50ce31 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/loadbalancer.py @@ -0,0 +1,419 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.elb.healthcheck import HealthCheck +from boto.ec2.elb.listener import Listener +from boto.ec2.elb.listelement import ListElement +from boto.ec2.elb.policies import Policies, OtherPolicy +from boto.ec2.elb.securitygroup import SecurityGroup +from boto.ec2.instanceinfo import InstanceInfo +from boto.resultset import ResultSet +from boto.compat import six + + +class Backend(object): + """Backend server description""" + + def __init__(self, connection=None): + self.connection = connection + self.instance_port = None + self.policies = None + + def __repr__(self): + return 'Backend(%r:%r)' % (self.instance_port, self.policies) + + def startElement(self, name, attrs, connection): + if name == 'PolicyNames': + self.policies = ResultSet([('member', OtherPolicy)]) + return self.policies + + def endElement(self, name, value, connection): + if name == 'InstancePort': + self.instance_port = int(value) + return + + +class LoadBalancerZones(object): + """ + Used to collect the zones for a Load Balancer when enable_zones + or disable_zones are called. + """ + def __init__(self, connection=None): + self.connection = connection + self.zones = ListElement() + + def startElement(self, name, attrs, connection): + if name == 'AvailabilityZones': + return self.zones + + def endElement(self, name, value, connection): + pass + + +class LoadBalancer(object): + """ + Represents an EC2 Load Balancer. + """ + + def __init__(self, connection=None, name=None, endpoints=None): + """ + :ivar boto.ec2.elb.ELBConnection connection: The connection this load + balancer was instance was instantiated from. + :ivar list listeners: A list of tuples in the form of + ``(, , )`` + :ivar boto.ec2.elb.healthcheck.HealthCheck health_check: The health + check policy for this load balancer. + :ivar boto.ec2.elb.policies.Policies policies: Cookie stickiness and + other policies. + :ivar str name: The name of the Load Balancer. + :ivar str dns_name: The external DNS name for the balancer. + :ivar str created_time: A date+time string showing when the + load balancer was created. + :ivar list instances: A list of :py:class:`boto.ec2.instanceinfo.InstanceInfo` + instances, representing the EC2 instances this load balancer is + distributing requests to. + :ivar list availability_zones: The availability zones this balancer + covers. + :ivar str canonical_hosted_zone_name: Current CNAME for the balancer. + :ivar str canonical_hosted_zone_name_id: The Route 53 hosted zone + ID of this balancer. Needed when creating an Alias record in a + Route 53 hosted zone. + :ivar boto.ec2.elb.securitygroup.SecurityGroup source_security_group: + The security group that you can use as part of your inbound rules + for your load balancer back-end instances to disallow traffic + from sources other than your load balancer. + :ivar list subnets: A list of subnets this balancer is on. + :ivar list security_groups: A list of additional security groups that + have been applied. + :ivar str vpc_id: The ID of the VPC that this ELB resides within. + :ivar list backends: A list of :py:class:`boto.ec2.elb.loadbalancer.Backend + back-end server descriptions. + """ + self.connection = connection + self.name = name + self.listeners = None + self.health_check = None + self.policies = None + self.dns_name = None + self.created_time = None + self.instances = None + self.availability_zones = ListElement() + self.canonical_hosted_zone_name = None + self.canonical_hosted_zone_name_id = None + self.source_security_group = None + self.subnets = ListElement() + self.security_groups = ListElement() + self.vpc_id = None + self.scheme = None + self.backends = None + self._attributes = None + + def __repr__(self): + return 'LoadBalancer:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'HealthCheck': + self.health_check = HealthCheck(self) + return self.health_check + elif name == 'ListenerDescriptions': + self.listeners = ResultSet([('member', Listener)]) + return self.listeners + elif name == 'AvailabilityZones': + return self.availability_zones + elif name == 'Instances': + self.instances = ResultSet([('member', InstanceInfo)]) + return self.instances + elif name == 'Policies': + self.policies = Policies(self) + return self.policies + elif name == 'SourceSecurityGroup': + self.source_security_group = SecurityGroup() + return self.source_security_group + elif name == 'Subnets': + return self.subnets + elif name == 'SecurityGroups': + return self.security_groups + elif name == 'VPCId': + pass + elif name == "BackendServerDescriptions": + self.backends = ResultSet([('member', Backend)]) + return self.backends + else: + return None + + def endElement(self, name, value, connection): + if name == 'LoadBalancerName': + self.name = value + elif name == 'DNSName': + self.dns_name = value + elif name == 'CreatedTime': + self.created_time = value + elif name == 'InstanceId': + self.instances.append(value) + elif name == 'CanonicalHostedZoneName': + self.canonical_hosted_zone_name = value + elif name == 'CanonicalHostedZoneNameID': + self.canonical_hosted_zone_name_id = value + elif name == 'VPCId': + self.vpc_id = value + elif name == 'Scheme': + self.scheme = value + else: + setattr(self, name, value) + + def enable_zones(self, zones): + """ + Enable availability zones to this Access Point. + All zones must be in the same region as the Access Point. + + :type zones: string or List of strings + :param zones: The name of the zone(s) to add. + + """ + if isinstance(zones, six.string_types): + zones = [zones] + new_zones = self.connection.enable_availability_zones(self.name, zones) + self.availability_zones = new_zones + + def disable_zones(self, zones): + """ + Disable availability zones from this Access Point. + + :type zones: string or List of strings + :param zones: The name of the zone(s) to add. + + """ + if isinstance(zones, six.string_types): + zones = [zones] + new_zones = self.connection.disable_availability_zones( + self.name, zones) + self.availability_zones = new_zones + + def get_attributes(self, force=False): + """ + Gets the LbAttributes. The Attributes will be cached. + + :type force: bool + :param force: Ignore cache value and reload. + + :rtype: boto.ec2.elb.attributes.LbAttributes + :return: The LbAttribues object + """ + if not self._attributes or force: + self._attributes = self.connection.get_all_lb_attributes(self.name) + return self._attributes + + def is_cross_zone_load_balancing(self, force=False): + """ + Identifies if the ELB is current configured to do CrossZone Balancing. + + :type force: bool + :param force: Ignore cache value and reload. + + :rtype: bool + :return: True if balancing is enabled, False if not. + """ + return self.get_attributes(force).cross_zone_load_balancing.enabled + + def enable_cross_zone_load_balancing(self): + """ + Turns on CrossZone Load Balancing for this ELB. + + :rtype: bool + :return: True if successful, False if not. + """ + success = self.connection.modify_lb_attribute( + self.name, 'crossZoneLoadBalancing', True) + if success and self._attributes: + self._attributes.cross_zone_load_balancing.enabled = True + return success + + def disable_cross_zone_load_balancing(self): + """ + Turns off CrossZone Load Balancing for this ELB. + + :rtype: bool + :return: True if successful, False if not. + """ + success = self.connection.modify_lb_attribute( + self.name, 'crossZoneLoadBalancing', False) + if success and self._attributes: + self._attributes.cross_zone_load_balancing.enabled = False + return success + + def register_instances(self, instances): + """ + Adds instances to this load balancer. All instances must be in the same + region as the load balancer. Adding endpoints that are already + registered with the load balancer has no effect. + + :param list instances: List of instance IDs (strings) that you'd like + to add to this load balancer. + + """ + if isinstance(instances, six.string_types): + instances = [instances] + new_instances = self.connection.register_instances(self.name, + instances) + self.instances = new_instances + + def deregister_instances(self, instances): + """ + Remove instances from this load balancer. Removing instances that are + not registered with the load balancer has no effect. + + :param list instances: List of instance IDs (strings) that you'd like + to remove from this load balancer. + + """ + if isinstance(instances, six.string_types): + instances = [instances] + new_instances = self.connection.deregister_instances(self.name, + instances) + self.instances = new_instances + + def delete(self): + """ + Delete this load balancer. + """ + return self.connection.delete_load_balancer(self.name) + + def configure_health_check(self, health_check): + """ + Configures the health check behavior for the instances behind this + load balancer. See :ref:`elb-configuring-a-health-check` for a + walkthrough. + + :param boto.ec2.elb.healthcheck.HealthCheck health_check: A + HealthCheck instance that tells the load balancer how to check + its instances for health. + """ + return self.connection.configure_health_check(self.name, health_check) + + def get_instance_health(self, instances=None): + """ + Returns a list of :py:class:`boto.ec2.elb.instancestate.InstanceState` + objects, which show the health of the instances attached to this + load balancer. + + :rtype: list + :returns: A list of + :py:class:`InstanceState ` + instances, representing the instances + attached to this load balancer. + """ + return self.connection.describe_instance_health(self.name, instances) + + def create_listeners(self, listeners): + return self.connection.create_load_balancer_listeners(self.name, + listeners) + + def create_listener(self, inPort, outPort=None, proto="tcp"): + if outPort is None: + outPort = inPort + return self.create_listeners([(inPort, outPort, proto)]) + + def delete_listeners(self, listeners): + return self.connection.delete_load_balancer_listeners(self.name, + listeners) + + def delete_listener(self, inPort): + return self.delete_listeners([inPort]) + + def delete_policy(self, policy_name): + """ + Deletes a policy from the LoadBalancer. The specified policy must not + be enabled for any listeners. + """ + return self.connection.delete_lb_policy(self.name, policy_name) + + def set_policies_of_listener(self, lb_port, policies): + return self.connection.set_lb_policies_of_listener(self.name, + lb_port, + policies) + + def set_policies_of_backend_server(self, instance_port, policies): + return self.connection.set_lb_policies_of_backend_server( + self.name, instance_port, policies) + + def create_cookie_stickiness_policy(self, cookie_expiration_period, + policy_name): + return self.connection.create_lb_cookie_stickiness_policy( + cookie_expiration_period, self.name, policy_name) + + def create_app_cookie_stickiness_policy(self, name, policy_name): + return self.connection.create_app_cookie_stickiness_policy(name, + self.name, + policy_name) + + def set_listener_SSL_certificate(self, lb_port, ssl_certificate_id): + return self.connection.set_lb_listener_SSL_certificate( + self.name, lb_port, ssl_certificate_id) + + def create_lb_policy(self, policy_name, policy_type, policy_attribute): + return self.connection.create_lb_policy( + self.name, policy_name, policy_type, policy_attribute) + + def attach_subnets(self, subnets): + """ + Attaches load balancer to one or more subnets. + Attaching subnets that are already registered with the + Load Balancer has no effect. + + :type subnets: string or List of strings + :param subnets: The name of the subnet(s) to add. + + """ + if isinstance(subnets, six.string_types): + subnets = [subnets] + new_subnets = self.connection.attach_lb_to_subnets(self.name, subnets) + self.subnets = new_subnets + + def detach_subnets(self, subnets): + """ + Detaches load balancer from one or more subnets. + + :type subnets: string or List of strings + :param subnets: The name of the subnet(s) to detach. + + """ + if isinstance(subnets, six.string_types): + subnets = [subnets] + new_subnets = self.connection.detach_lb_from_subnets( + self.name, subnets) + self.subnets = new_subnets + + def apply_security_groups(self, security_groups): + """ + Associates one or more security groups with the load balancer. + The provided security groups will override any currently applied + security groups. + + :type security_groups: string or List of strings + :param security_groups: The name of the security group(s) to add. + + """ + if isinstance(security_groups, six.string_types): + security_groups = [security_groups] + new_sgs = self.connection.apply_security_groups_to_lb( + self.name, security_groups) + self.security_groups = new_sgs diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/policies.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/policies.py new file mode 100644 index 0000000000000000000000000000000000000000..50fac0aff9aac0581e84cde902dbc108bdd60544 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/policies.py @@ -0,0 +1,108 @@ +# Copyright (c) 2010 Reza Lotun http://reza.lotun.name +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.resultset import ResultSet + + +class AppCookieStickinessPolicy(object): + def __init__(self, connection=None): + self.cookie_name = None + self.policy_name = None + + def __repr__(self): + return 'AppCookieStickiness(%s, %s)' % (self.policy_name, + self.cookie_name) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'CookieName': + self.cookie_name = value + elif name == 'PolicyName': + self.policy_name = value + + +class LBCookieStickinessPolicy(object): + def __init__(self, connection=None): + self.policy_name = None + self.cookie_expiration_period = None + + def __repr__(self): + return 'LBCookieStickiness(%s, %s)' % (self.policy_name, + self.cookie_expiration_period) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'CookieExpirationPeriod': + self.cookie_expiration_period = value + elif name == 'PolicyName': + self.policy_name = value + + +class OtherPolicy(object): + def __init__(self, connection=None): + self.policy_name = None + + def __repr__(self): + return 'OtherPolicy(%s)' % (self.policy_name) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + self.policy_name = value + + +class Policies(object): + """ + ELB Policies + """ + def __init__(self, connection=None): + self.connection = connection + self.app_cookie_stickiness_policies = None + self.lb_cookie_stickiness_policies = None + self.other_policies = None + + def __repr__(self): + app = 'AppCookieStickiness%s' % self.app_cookie_stickiness_policies + lb = 'LBCookieStickiness%s' % self.lb_cookie_stickiness_policies + other = 'Other%s' % self.other_policies + return 'Policies(%s,%s,%s)' % (app, lb, other) + + def startElement(self, name, attrs, connection): + if name == 'AppCookieStickinessPolicies': + rs = ResultSet([('member', AppCookieStickinessPolicy)]) + self.app_cookie_stickiness_policies = rs + return rs + elif name == 'LBCookieStickinessPolicies': + rs = ResultSet([('member', LBCookieStickinessPolicy)]) + self.lb_cookie_stickiness_policies = rs + return rs + elif name == 'OtherPolicies': + rs = ResultSet([('member', OtherPolicy)]) + self.other_policies = rs + return rs + + def endElement(self, name, value, connection): + return diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/securitygroup.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/securitygroup.py new file mode 100644 index 0000000000000000000000000000000000000000..65f981f8e21f20f4c989731102f06a91b4e4d0c5 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/securitygroup.py @@ -0,0 +1,38 @@ +# Copyright (c) 2010 Reza Lotun http://reza.lotun.name +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class SecurityGroup(object): + def __init__(self, connection=None): + self.name = None + self.owner_alias = None + + def __repr__(self): + return 'SecurityGroup(%s, %s)' % (self.name, self.owner_alias) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'GroupName': + self.name = value + elif name == 'OwnerAlias': + self.owner_alias = value diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/group.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/group.py new file mode 100644 index 0000000000000000000000000000000000000000..99d78734212b0fe113b5d03dc974123a9f11c95d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/group.py @@ -0,0 +1,38 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Group(object): + def __init__(self, parent=None): + self.id = None + self.name = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'groupId': + self.id = value + elif name == 'groupName': + self.name = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/image.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/image.py new file mode 100644 index 0000000000000000000000000000000000000000..612404f07def5e9d929764d7fac68b98841b1dd0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/image.py @@ -0,0 +1,440 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.ec2object import EC2Object, TaggedEC2Object +from boto.ec2.blockdevicemapping import BlockDeviceMapping + + +class ProductCodes(list): + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'productCode': + self.append(value) + + +class BillingProducts(list): + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'billingProduct': + self.append(value) + + +class Image(TaggedEC2Object): + """ + Represents an EC2 Image + """ + + def __init__(self, connection=None): + super(Image, self).__init__(connection) + self.id = None + self.location = None + self.state = None + self.ownerId = None # for backwards compatibility + self.owner_id = None + self.owner_alias = None + self.is_public = False + self.architecture = None + self.platform = None + self.type = None + self.kernel_id = None + self.ramdisk_id = None + self.name = None + self.description = None + self.product_codes = ProductCodes() + self.billing_products = BillingProducts() + self.block_device_mapping = None + self.root_device_type = None + self.root_device_name = None + self.virtualization_type = None + self.hypervisor = None + self.instance_lifecycle = None + self.sriov_net_support = None + + def __repr__(self): + return 'Image:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(Image, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'blockDeviceMapping': + self.block_device_mapping = BlockDeviceMapping() + return self.block_device_mapping + elif name == 'productCodes': + return self.product_codes + elif name == 'billingProducts': + return self.billing_products + else: + return None + + def endElement(self, name, value, connection): + if name == 'imageId': + self.id = value + elif name == 'imageLocation': + self.location = value + elif name == 'imageState': + self.state = value + elif name == 'imageOwnerId': + self.ownerId = value # for backwards compatibility + self.owner_id = value + elif name == 'isPublic': + if value == 'false': + self.is_public = False + elif value == 'true': + self.is_public = True + else: + raise Exception( + 'Unexpected value of isPublic %s for image %s' % ( + value, + self.id + ) + ) + elif name == 'architecture': + self.architecture = value + elif name == 'imageType': + self.type = value + elif name == 'kernelId': + self.kernel_id = value + elif name == 'ramdiskId': + self.ramdisk_id = value + elif name == 'imageOwnerAlias': + self.owner_alias = value + elif name == 'platform': + self.platform = value + elif name == 'name': + self.name = value + elif name == 'description': + self.description = value + elif name == 'rootDeviceType': + self.root_device_type = value + elif name == 'rootDeviceName': + self.root_device_name = value + elif name == 'virtualizationType': + self.virtualization_type = value + elif name == 'hypervisor': + self.hypervisor = value + elif name == 'instanceLifecycle': + self.instance_lifecycle = value + elif name == 'sriovNetSupport': + self.sriov_net_support = value + else: + setattr(self, name, value) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self, validate=False, dry_run=False): + """ + Update the image's state information by making a call to fetch + the current image attributes from the service. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + image the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + rs = self.connection.get_all_images([self.id], dry_run=dry_run) + if len(rs) > 0: + img = rs[0] + if img.id == self.id: + self._update(img) + elif validate: + raise ValueError('%s is not a valid Image ID' % self.id) + return self.state + + def run(self, min_count=1, max_count=1, key_name=None, + security_groups=None, user_data=None, + addressing_type=None, instance_type='m1.small', placement=None, + kernel_id=None, ramdisk_id=None, + monitoring_enabled=False, subnet_id=None, + block_device_map=None, + disable_api_termination=False, + instance_initiated_shutdown_behavior=None, + private_ip_address=None, + placement_group=None, security_group_ids=None, + additional_info=None, instance_profile_name=None, + instance_profile_arn=None, tenancy=None, dry_run=False): + + """ + Runs this instance. + + :type min_count: int + :param min_count: The minimum number of instances to start + + :type max_count: int + :param max_count: The maximum number of instances to start + + :type key_name: string + :param key_name: The name of the key pair with which to + launch instances. + + :type security_groups: list of strings + :param security_groups: The names of the security groups with which to + associate instances. + + :type user_data: string + :param user_data: The Base64-encoded MIME user data to be made + available to the instance(s) in this reservation. + + :type instance_type: string + :param instance_type: The type of instance to run: + + * t1.micro + * m1.small + * m1.medium + * m1.large + * m1.xlarge + * m3.medium + * m3.large + * m3.xlarge + * m3.2xlarge + * c1.medium + * c1.xlarge + * m2.xlarge + * m2.2xlarge + * m2.4xlarge + * cr1.8xlarge + * hi1.4xlarge + * hs1.8xlarge + * cc1.4xlarge + * cg1.4xlarge + * cc2.8xlarge + * g2.2xlarge + * c3.large + * c3.xlarge + * c3.2xlarge + * c3.4xlarge + * c3.8xlarge + * i2.xlarge + * i2.2xlarge + * i2.4xlarge + * i2.8xlarge + * t2.micro + * t2.small + * t2.medium + + :type placement: string + :param placement: The Availability Zone to launch the instance into. + + :type kernel_id: string + :param kernel_id: The ID of the kernel with which to launch the + instances. + + :type ramdisk_id: string + :param ramdisk_id: The ID of the RAM disk with which to launch the + instances. + + :type monitoring_enabled: bool + :param monitoring_enabled: Enable CloudWatch monitoring on + the instance. + + :type subnet_id: string + :param subnet_id: The subnet ID within which to launch the instances + for VPC. + + :type private_ip_address: string + :param private_ip_address: If you're using VPC, you can + optionally use this parameter to assign the instance a + specific available IP address from the subnet (e.g., + 10.0.0.25). + + :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_map: A BlockDeviceMapping data structure + describing the EBS volumes associated with the Image. + + :type disable_api_termination: bool + :param disable_api_termination: If True, the instances will be locked + and will not be able to be terminated via the API. + + :type instance_initiated_shutdown_behavior: string + :param instance_initiated_shutdown_behavior: Specifies whether the + instance stops or terminates on instance-initiated shutdown. + Valid values are: + + * stop + * terminate + + :type placement_group: string + :param placement_group: If specified, this is the name of the placement + group in which the instance(s) will be launched. + + :type additional_info: string + :param additional_info: Specifies additional information to make + available to the instance(s). + + :type security_group_ids: list of strings + :param security_group_ids: The ID of the VPC security groups with + which to associate instances. + + :type instance_profile_name: string + :param instance_profile_name: The name of + the IAM Instance Profile (IIP) to associate with the instances. + + :type instance_profile_arn: string + :param instance_profile_arn: The Amazon resource name (ARN) of + the IAM Instance Profile (IIP) to associate with the instances. + + :type tenancy: string + :param tenancy: The tenancy of the instance you want to + launch. An instance with a tenancy of 'dedicated' runs on + single-tenant hardware and can only be launched into a + VPC. Valid values are:"default" or "dedicated". + NOTE: To use dedicated tenancy you MUST specify a VPC + subnet-ID as well. + + :rtype: Reservation + :return: The :class:`boto.ec2.instance.Reservation` associated with + the request for machines + + """ + + return self.connection.run_instances(self.id, min_count, max_count, + key_name, security_groups, + user_data, addressing_type, + instance_type, placement, + kernel_id, ramdisk_id, + monitoring_enabled, subnet_id, + block_device_map, disable_api_termination, + instance_initiated_shutdown_behavior, + private_ip_address, placement_group, + security_group_ids=security_group_ids, + additional_info=additional_info, + instance_profile_name=instance_profile_name, + instance_profile_arn=instance_profile_arn, + tenancy=tenancy, dry_run=dry_run) + + def deregister(self, delete_snapshot=False, dry_run=False): + return self.connection.deregister_image( + self.id, + delete_snapshot, + dry_run=dry_run + ) + + def get_launch_permissions(self, dry_run=False): + img_attrs = self.connection.get_image_attribute( + self.id, + 'launchPermission', + dry_run=dry_run + ) + return img_attrs.attrs + + def set_launch_permissions(self, user_ids=None, group_names=None, + dry_run=False): + return self.connection.modify_image_attribute(self.id, + 'launchPermission', + 'add', + user_ids, + group_names, + dry_run=dry_run) + + def remove_launch_permissions(self, user_ids=None, group_names=None, + dry_run=False): + return self.connection.modify_image_attribute(self.id, + 'launchPermission', + 'remove', + user_ids, + group_names, + dry_run=dry_run) + + def reset_launch_attributes(self, dry_run=False): + return self.connection.reset_image_attribute( + self.id, + 'launchPermission', + dry_run=dry_run + ) + + def get_kernel(self, dry_run=False): + img_attrs = self.connection.get_image_attribute( + self.id, + 'kernel', + dry_run=dry_run + ) + return img_attrs.kernel + + def get_ramdisk(self, dry_run=False): + img_attrs = self.connection.get_image_attribute( + self.id, + 'ramdisk', + dry_run=dry_run + ) + return img_attrs.ramdisk + + +class ImageAttribute(object): + def __init__(self, parent=None): + self.name = None + self.kernel = None + self.ramdisk = None + self.attrs = {} + + def startElement(self, name, attrs, connection): + if name == 'blockDeviceMapping': + self.attrs['block_device_mapping'] = BlockDeviceMapping() + return self.attrs['block_device_mapping'] + else: + return None + + def endElement(self, name, value, connection): + if name == 'launchPermission': + self.name = 'launch_permission' + elif name == 'group': + if 'groups' in self.attrs: + self.attrs['groups'].append(value) + else: + self.attrs['groups'] = [value] + elif name == 'userId': + if 'user_ids' in self.attrs: + self.attrs['user_ids'].append(value) + else: + self.attrs['user_ids'] = [value] + elif name == 'productCode': + if 'product_codes' in self.attrs: + self.attrs['product_codes'].append(value) + else: + self.attrs['product_codes'] = [value] + elif name == 'imageId': + self.image_id = value + elif name == 'kernel': + self.kernel = value + elif name == 'ramdisk': + self.ramdisk = value + else: + setattr(self, name, value) + + +class CopyImage(object): + def __init__(self, parent=None): + self._parent = parent + self.image_id = None + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'imageId': + self.image_id = value diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/instance.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/instance.py new file mode 100644 index 0000000000000000000000000000000000000000..eb7c9ec615e79daa75b8f7a2df4015a6720530b7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/instance.py @@ -0,0 +1,678 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Instance +""" +import boto +from boto.ec2.ec2object import EC2Object, TaggedEC2Object +from boto.resultset import ResultSet +from boto.ec2.address import Address +from boto.ec2.blockdevicemapping import BlockDeviceMapping +from boto.ec2.image import ProductCodes +from boto.ec2.networkinterface import NetworkInterface +from boto.ec2.group import Group +import base64 + + +class InstanceState(object): + """ + The state of the instance. + + :ivar code: The low byte represents the state. The high byte is an + opaque internal value and should be ignored. Valid values: + + * 0 (pending) + * 16 (running) + * 32 (shutting-down) + * 48 (terminated) + * 64 (stopping) + * 80 (stopped) + + :ivar name: The name of the state of the instance. Valid values: + + * "pending" + * "running" + * "shutting-down" + * "terminated" + * "stopping" + * "stopped" + """ + def __init__(self, code=0, name=None): + self.code = code + self.name = name + + def __repr__(self): + return '%s(%d)' % (self.name, self.code) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'code': + self.code = int(value) + elif name == 'name': + self.name = value + else: + setattr(self, name, value) + + +class InstancePlacement(object): + """ + The location where the instance launched. + + :ivar zone: The Availability Zone of the instance. + :ivar group_name: The name of the placement group the instance is + in (for cluster compute instances). + :ivar tenancy: The tenancy of the instance (if the instance is + running within a VPC). An instance with a tenancy of dedicated + runs on single-tenant hardware. + """ + def __init__(self, zone=None, group_name=None, tenancy=None): + self.zone = zone + self.group_name = group_name + self.tenancy = tenancy + + def __repr__(self): + return self.zone + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'availabilityZone': + self.zone = value + elif name == 'groupName': + self.group_name = value + elif name == 'tenancy': + self.tenancy = value + else: + setattr(self, name, value) + + +class Reservation(EC2Object): + """ + Represents a Reservation response object. + + :ivar id: The unique ID of the Reservation. + :ivar owner_id: The unique ID of the owner of the Reservation. + :ivar groups: A list of Group objects representing the security + groups associated with launched instances. + :ivar instances: A list of Instance objects launched in this + Reservation. + """ + def __init__(self, connection=None): + super(Reservation, self).__init__(connection) + self.id = None + self.owner_id = None + self.groups = [] + self.instances = [] + + def __repr__(self): + return 'Reservation:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'instancesSet': + self.instances = ResultSet([('item', Instance)]) + return self.instances + elif name == 'groupSet': + self.groups = ResultSet([('item', Group)]) + return self.groups + else: + return None + + def endElement(self, name, value, connection): + if name == 'reservationId': + self.id = value + elif name == 'ownerId': + self.owner_id = value + else: + setattr(self, name, value) + + def stop_all(self, dry_run=False): + for instance in self.instances: + instance.stop(dry_run=dry_run) + + +class Instance(TaggedEC2Object): + """ + Represents an instance. + + :ivar id: The unique ID of the Instance. + :ivar groups: A list of Group objects representing the security + groups associated with the instance. + :ivar public_dns_name: The public dns name of the instance. + :ivar private_dns_name: The private dns name of the instance. + :ivar state: The string representation of the instance's current state. + :ivar state_code: An integer representation of the instance's + current state. + :ivar previous_state: The string representation of the instance's + previous state. + :ivar previous_state_code: An integer representation of the + instance's current state. + :ivar key_name: The name of the SSH key associated with the instance. + :ivar instance_type: The type of instance (e.g. m1.small). + :ivar launch_time: The time the instance was launched. + :ivar image_id: The ID of the AMI used to launch this instance. + :ivar placement: The availability zone in which the instance is running. + :ivar placement_group: The name of the placement group the instance + is in (for cluster compute instances). + :ivar placement_tenancy: The tenancy of the instance, if the instance + is running within a VPC. An instance with a tenancy of dedicated + runs on a single-tenant hardware. + :ivar kernel: The kernel associated with the instance. + :ivar ramdisk: The ramdisk associated with the instance. + :ivar architecture: The architecture of the image (i386|x86_64). + :ivar hypervisor: The hypervisor used. + :ivar virtualization_type: The type of virtualization used. + :ivar product_codes: A list of product codes associated with this instance. + :ivar ami_launch_index: This instances position within it's launch group. + :ivar monitored: A boolean indicating whether monitoring is enabled or not. + :ivar monitoring_state: A string value that contains the actual value + of the monitoring element returned by EC2. + :ivar spot_instance_request_id: The ID of the spot instance request + if this is a spot instance. + :ivar subnet_id: The VPC Subnet ID, if running in VPC. + :ivar vpc_id: The VPC ID, if running in VPC. + :ivar private_ip_address: The private IP address of the instance. + :ivar ip_address: The public IP address of the instance. + :ivar platform: Platform of the instance (e.g. Windows) + :ivar root_device_name: The name of the root device. + :ivar root_device_type: The root device type (ebs|instance-store). + :ivar block_device_mapping: The Block Device Mapping for the instance. + :ivar state_reason: The reason for the most recent state transition. + :ivar groups: List of security Groups associated with the instance. + :ivar interfaces: List of Elastic Network Interfaces associated with + this instance. + :ivar ebs_optimized: Whether instance is using optimized EBS volumes + or not. + :ivar instance_profile: A Python dict containing the instance + profile id and arn associated with this instance. + """ + + def __init__(self, connection=None): + super(Instance, self).__init__(connection) + self.id = None + self.dns_name = None + self.public_dns_name = None + self.private_dns_name = None + self.key_name = None + self.instance_type = None + self.launch_time = None + self.image_id = None + self.kernel = None + self.ramdisk = None + self.product_codes = ProductCodes() + self.ami_launch_index = None + self.monitored = False + self.monitoring_state = None + self.spot_instance_request_id = None + self.subnet_id = None + self.vpc_id = None + self.private_ip_address = None + self.ip_address = None + self.requester_id = None + self._in_monitoring_element = False + self.persistent = False + self.root_device_name = None + self.root_device_type = None + self.block_device_mapping = None + self.state_reason = None + self.group_name = None + self.client_token = None + self.eventsSet = None + self.groups = [] + self.platform = None + self.interfaces = [] + self.hypervisor = None + self.virtualization_type = None + self.architecture = None + self.instance_profile = None + self._previous_state = None + self._state = InstanceState() + self._placement = InstancePlacement() + + def __repr__(self): + return 'Instance:%s' % self.id + + @property + def state(self): + return self._state.name + + @property + def state_code(self): + return self._state.code + + @property + def previous_state(self): + if self._previous_state: + return self._previous_state.name + return None + + @property + def previous_state_code(self): + if self._previous_state: + return self._previous_state.code + return 0 + + @property + def placement(self): + return self._placement.zone + + @property + def placement_group(self): + return self._placement.group_name + + @property + def placement_tenancy(self): + return self._placement.tenancy + + def startElement(self, name, attrs, connection): + retval = super(Instance, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'monitoring': + self._in_monitoring_element = True + elif name == 'blockDeviceMapping': + self.block_device_mapping = BlockDeviceMapping() + return self.block_device_mapping + elif name == 'productCodes': + return self.product_codes + elif name == 'stateReason': + self.state_reason = SubParse('stateReason') + return self.state_reason + elif name == 'groupSet': + self.groups = ResultSet([('item', Group)]) + return self.groups + elif name == "eventsSet": + self.eventsSet = SubParse('eventsSet') + return self.eventsSet + elif name == 'networkInterfaceSet': + self.interfaces = ResultSet([('item', NetworkInterface)]) + return self.interfaces + elif name == 'iamInstanceProfile': + self.instance_profile = SubParse('iamInstanceProfile') + return self.instance_profile + elif name == 'currentState': + return self._state + elif name == 'previousState': + self._previous_state = InstanceState() + return self._previous_state + elif name == 'instanceState': + return self._state + elif name == 'placement': + return self._placement + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.id = value + elif name == 'imageId': + self.image_id = value + elif name == 'dnsName' or name == 'publicDnsName': + self.dns_name = value # backwards compatibility + self.public_dns_name = value + elif name == 'privateDnsName': + self.private_dns_name = value + elif name == 'keyName': + self.key_name = value + elif name == 'amiLaunchIndex': + self.ami_launch_index = value + elif name == 'previousState': + self.previous_state = value + elif name == 'instanceType': + self.instance_type = value + elif name == 'rootDeviceName': + self.root_device_name = value + elif name == 'rootDeviceType': + self.root_device_type = value + elif name == 'launchTime': + self.launch_time = value + elif name == 'platform': + self.platform = value + elif name == 'kernelId': + self.kernel = value + elif name == 'ramdiskId': + self.ramdisk = value + elif name == 'state': + if self._in_monitoring_element: + self.monitoring_state = value + if value == 'enabled': + self.monitored = True + self._in_monitoring_element = False + elif name == 'spotInstanceRequestId': + self.spot_instance_request_id = value + elif name == 'subnetId': + self.subnet_id = value + elif name == 'vpcId': + self.vpc_id = value + elif name == 'privateIpAddress': + self.private_ip_address = value + elif name == 'ipAddress': + self.ip_address = value + elif name == 'requesterId': + self.requester_id = value + elif name == 'persistent': + if value == 'true': + self.persistent = True + else: + self.persistent = False + elif name == 'groupName': + if self._in_monitoring_element: + self.group_name = value + elif name == 'clientToken': + self.client_token = value + elif name == "eventsSet": + self.events = value + elif name == 'hypervisor': + self.hypervisor = value + elif name == 'virtualizationType': + self.virtualization_type = value + elif name == 'architecture': + self.architecture = value + elif name == 'ebsOptimized': + self.ebs_optimized = (value == 'true') + else: + setattr(self, name, value) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self, validate=False, dry_run=False): + """ + Update the instance's state information by making a call to fetch + the current instance attributes from the service. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + instance the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + rs = self.connection.get_all_reservations([self.id], dry_run=dry_run) + if len(rs) > 0: + r = rs[0] + for i in r.instances: + if i.id == self.id: + self._update(i) + elif validate: + raise ValueError('%s is not a valid Instance ID' % self.id) + return self.state + + def terminate(self, dry_run=False): + """ + Terminate the instance + """ + rs = self.connection.terminate_instances([self.id], dry_run=dry_run) + if len(rs) > 0: + self._update(rs[0]) + + def stop(self, force=False, dry_run=False): + """ + Stop the instance + + :type force: bool + :param force: Forces the instance to stop + + :rtype: list + :return: A list of the instances stopped + """ + rs = self.connection.stop_instances([self.id], force, dry_run=dry_run) + if len(rs) > 0: + self._update(rs[0]) + + def start(self, dry_run=False): + """ + Start the instance. + """ + rs = self.connection.start_instances([self.id], dry_run=dry_run) + if len(rs) > 0: + self._update(rs[0]) + + def reboot(self, dry_run=False): + return self.connection.reboot_instances([self.id], dry_run=dry_run) + + def get_console_output(self, dry_run=False): + """ + Retrieves the console output for the instance. + + :rtype: :class:`boto.ec2.instance.ConsoleOutput` + :return: The console output as a ConsoleOutput object + """ + return self.connection.get_console_output(self.id, dry_run=dry_run) + + def confirm_product(self, product_code, dry_run=False): + return self.connection.confirm_product_instance( + self.id, + product_code, + dry_run=dry_run + ) + + def use_ip(self, ip_address, dry_run=False): + """ + Associates an Elastic IP to the instance. + + :type ip_address: Either an instance of + :class:`boto.ec2.address.Address` or a string. + :param ip_address: The IP address to associate + with the instance. + + :rtype: bool + :return: True if successful + """ + + if isinstance(ip_address, Address): + ip_address = ip_address.public_ip + return self.connection.associate_address( + self.id, + ip_address, + dry_run=dry_run + ) + + def monitor(self, dry_run=False): + return self.connection.monitor_instance(self.id, dry_run=dry_run) + + def unmonitor(self, dry_run=False): + return self.connection.unmonitor_instance(self.id, dry_run=dry_run) + + def get_attribute(self, attribute, dry_run=False): + """ + Gets an attribute from this instance. + + :type attribute: string + :param attribute: The attribute you need information about + Valid choices are: + + * instanceType + * kernel + * ramdisk + * userData + * disableApiTermination + * instanceInitiatedShutdownBehavior + * rootDeviceName + * blockDeviceMapping + * productCodes + * sourceDestCheck + * groupSet + * ebsOptimized + + :rtype: :class:`boto.ec2.image.InstanceAttribute` + :return: An InstanceAttribute object representing the value of the + attribute requested + """ + return self.connection.get_instance_attribute( + self.id, + attribute, + dry_run=dry_run + ) + + def modify_attribute(self, attribute, value, dry_run=False): + """ + Changes an attribute of this instance + + :type attribute: string + :param attribute: The attribute you wish to change. + + * instanceType - A valid instance type (m1.small) + * kernel - Kernel ID (None) + * ramdisk - Ramdisk ID (None) + * userData - Base64 encoded String (None) + * disableApiTermination - Boolean (true) + * instanceInitiatedShutdownBehavior - stop|terminate + * sourceDestCheck - Boolean (true) + * groupSet - Set of Security Groups or IDs + * ebsOptimized - Boolean (false) + + :type value: string + :param value: The new value for the attribute + + :rtype: bool + :return: Whether the operation succeeded or not + """ + return self.connection.modify_instance_attribute( + self.id, + attribute, + value, + dry_run=dry_run + ) + + def reset_attribute(self, attribute, dry_run=False): + """ + Resets an attribute of this instance to its default value. + + :type attribute: string + :param attribute: The attribute to reset. Valid values are: + kernel|ramdisk + + :rtype: bool + :return: Whether the operation succeeded or not + """ + return self.connection.reset_instance_attribute( + self.id, + attribute, + dry_run=dry_run + ) + + def create_image(self, name, description=None, no_reboot=False, + dry_run=False): + """ + Will create an AMI from the instance in the running or stopped + state. + + :type name: string + :param name: The name of the new image + + :type description: string + :param description: An optional human-readable string describing + the contents and purpose of the AMI. + + :type no_reboot: bool + :param no_reboot: An optional flag indicating that the bundling process + should not attempt to shutdown the instance before + bundling. If this flag is True, the responsibility + of maintaining file system integrity is left to the + owner of the instance. + + :rtype: string + :return: The new image id + """ + return self.connection.create_image( + self.id, + name, + description, + no_reboot, + dry_run=dry_run + ) + + +class ConsoleOutput(object): + def __init__(self, parent=None): + self.parent = parent + self.instance_id = None + self.timestamp = None + self.output = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.instance_id = value + elif name == 'timestamp': + self.timestamp = value + elif name == 'output': + self.output = base64.b64decode(value) + else: + setattr(self, name, value) + + +class InstanceAttribute(dict): + ValidValues = ['instanceType', 'kernel', 'ramdisk', 'userData', + 'disableApiTermination', + 'instanceInitiatedShutdownBehavior', + 'rootDeviceName', 'blockDeviceMapping', 'sourceDestCheck', + 'groupSet'] + + def __init__(self, parent=None): + dict.__init__(self) + self.instance_id = None + self.request_id = None + self._current_value = None + + def startElement(self, name, attrs, connection): + if name == 'blockDeviceMapping': + self[name] = BlockDeviceMapping() + return self[name] + elif name == 'groupSet': + self[name] = ResultSet([('item', Group)]) + return self[name] + else: + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.instance_id = value + elif name == 'requestId': + self.request_id = value + elif name == 'value': + if value == 'true': + value = True + elif value == 'false': + value = False + self._current_value = value + elif name in self.ValidValues: + self[name] = self._current_value + + +class SubParse(dict): + def __init__(self, section, parent=None): + dict.__init__(self) + self.section = section + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name != self.section: + self[name] = value diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/instanceinfo.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/instanceinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..afa8b9cba03e26b036af8ac590dfd2c318be8cfa --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/instanceinfo.py @@ -0,0 +1,49 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class InstanceInfo(object): + """ + Represents an EC2 Instance status response from CloudWatch + """ + + def __init__(self, connection=None, id=None, state=None): + """ + :ivar str id: The instance's EC2 ID. + :ivar str state: Specifies the current status of the instance. + """ + self.connection = connection + self.id = id + self.state = state + + def __repr__(self): + return 'InstanceInfo:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'instanceId' or name == 'InstanceId': + self.id = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/instancestatus.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/instancestatus.py new file mode 100644 index 0000000000000000000000000000000000000000..b09b55ee8034236dade0fa4b123483a07e59973b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/instancestatus.py @@ -0,0 +1,212 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Details(dict): + """ + A dict object that contains name/value pairs which provide + more detailed information about the status of the system + or the instance. + """ + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'name': + self._name = value + elif name == 'status': + self[self._name] = value + else: + setattr(self, name, value) + + +class Event(object): + """ + A status event for an instance. + + :ivar code: A string indicating the event type. + :ivar description: A string describing the reason for the event. + :ivar not_before: A datestring describing the earliest time for + the event. + :ivar not_after: A datestring describing the latest time for + the event. + """ + + def __init__(self, code=None, description=None, + not_before=None, not_after=None): + self.code = code + self.description = description + self.not_before = not_before + self.not_after = not_after + + def __repr__(self): + return 'Event:%s' % self.code + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'code': + self.code = value + elif name == 'description': + self.description = value + elif name == 'notBefore': + self.not_before = value + elif name == 'notAfter': + self.not_after = value + else: + setattr(self, name, value) + + +class Status(object): + """ + A generic Status object used for system status and instance status. + + :ivar status: A string indicating overall status. + :ivar details: A dict containing name-value pairs which provide + more details about the current status. + """ + + def __init__(self, status=None, details=None): + self.status = status + if not details: + details = Details() + self.details = details + + def __repr__(self): + return 'Status:%s' % self.status + + def startElement(self, name, attrs, connection): + if name == 'details': + return self.details + return None + + def endElement(self, name, value, connection): + if name == 'status': + self.status = value + else: + setattr(self, name, value) + + +class EventSet(list): + + def startElement(self, name, attrs, connection): + if name == 'item': + event = Event() + self.append(event) + return event + else: + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class InstanceStatus(object): + """ + Represents an EC2 Instance status as reported by + DescribeInstanceStatus request. + + :ivar id: The instance identifier. + :ivar zone: The availability zone of the instance. + :ivar events: A list of events relevant to the instance. + :ivar state_code: An integer representing the current state + of the instance. + :ivar state_name: A string describing the current state + of the instance. + :ivar system_status: A Status object that reports impaired + functionality that stems from issues related to the systems + that support an instance, such as such as hardware failures + and network connectivity problems. + :ivar instance_status: A Status object that reports impaired + functionality that arises from problems internal to the instance. + """ + + def __init__(self, id=None, zone=None, events=None, + state_code=None, state_name=None): + self.id = id + self.zone = zone + self.events = events + self.state_code = state_code + self.state_name = state_name + self.system_status = Status() + self.instance_status = Status() + + def __repr__(self): + return 'InstanceStatus:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'eventsSet': + self.events = EventSet() + return self.events + elif name == 'systemStatus': + return self.system_status + elif name == 'instanceStatus': + return self.instance_status + else: + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.id = value + elif name == 'availabilityZone': + self.zone = value + elif name == 'code': + self.state_code = int(value) + elif name == 'name': + self.state_name = value + else: + setattr(self, name, value) + + +class InstanceStatusSet(list): + """ + A list object that contains the results of a call to + DescribeInstanceStatus request. Each element of the + list will be an InstanceStatus object. + + :ivar next_token: If the response was truncated by + the EC2 service, the next_token attribute of the + object will contain the string that needs to be + passed in to the next request to retrieve the next + set of results. + """ + + def __init__(self, connection=None): + list.__init__(self) + self.connection = connection + self.next_token = None + + def startElement(self, name, attrs, connection): + if name == 'item': + status = InstanceStatus() + self.append(status) + return status + else: + return None + + def endElement(self, name, value, connection): + if name == 'nextToken': + self.next_token = value + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/instancetype.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/instancetype.py new file mode 100644 index 0000000000000000000000000000000000000000..6197a3363bf5ad950f4b5f0adcf38f6482bffb2b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/instancetype.py @@ -0,0 +1,59 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +from boto.ec2.ec2object import EC2Object + + +class InstanceType(EC2Object): + """ + Represents an EC2 VM Type + + :ivar name: The name of the vm type + :ivar cores: The number of cpu cores for this vm type + :ivar memory: The amount of memory in megabytes for this vm type + :ivar disk: The amount of disk space in gigabytes for this vm type + """ + + def __init__(self, connection=None, name=None, cores=None, + memory=None, disk=None): + super(InstanceType, self).__init__(connection) + self.connection = connection + self.name = name + self.cores = cores + self.memory = memory + self.disk = disk + + def __repr__(self): + return 'InstanceType:%s-%s,%s,%s' % (self.name, self.cores, + self.memory, self.disk) + + def endElement(self, name, value, connection): + if name == 'name': + self.name = value + elif name == 'cpu': + self.cores = value + elif name == 'disk': + self.disk = value + elif name == 'memory': + self.memory = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/keypair.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/keypair.py new file mode 100644 index 0000000000000000000000000000000000000000..623fb409f8d2afa35aea148bf371f17239470aae --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/keypair.py @@ -0,0 +1,111 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Keypair +""" + +import os +from boto.ec2.ec2object import EC2Object +from boto.exception import BotoClientError + + +class KeyPair(EC2Object): + + def __init__(self, connection=None): + super(KeyPair, self).__init__(connection) + self.name = None + self.fingerprint = None + self.material = None + + def __repr__(self): + return 'KeyPair:%s' % self.name + + def endElement(self, name, value, connection): + if name == 'keyName': + self.name = value + elif name == 'keyFingerprint': + self.fingerprint = value + elif name == 'keyMaterial': + self.material = value + else: + setattr(self, name, value) + + def delete(self, dry_run=False): + """ + Delete the KeyPair. + + :rtype: bool + :return: True if successful, otherwise False. + """ + return self.connection.delete_key_pair(self.name, dry_run=dry_run) + + def save(self, directory_path): + """ + Save the material (the unencrypted PEM encoded RSA private key) + of a newly created KeyPair to a local file. + + :type directory_path: string + :param directory_path: The fully qualified path to the directory + in which the keypair will be saved. The + keypair file will be named using the name + of the keypair as the base name and .pem + for the file extension. If a file of that + name already exists in the directory, an + exception will be raised and the old file + will not be overwritten. + + :rtype: bool + :return: True if successful. + """ + if self.material: + directory_path = os.path.expanduser(directory_path) + file_path = os.path.join(directory_path, '%s.pem' % self.name) + if os.path.exists(file_path): + raise BotoClientError('%s already exists, it will not be overwritten' % file_path) + fp = open(file_path, 'wb') + fp.write(self.material) + fp.close() + os.chmod(file_path, 0o600) + return True + else: + raise BotoClientError('KeyPair contains no material') + + def copy_to_region(self, region, dry_run=False): + """ + Create a new key pair of the same new in another region. + Note that the new key pair will use a different ssh + cert than the this key pair. After doing the copy, + you will need to save the material associated with the + new key pair (use the save method) to a local file. + + :type region: :class:`boto.ec2.regioninfo.RegionInfo` + :param region: The region to which this security group will be copied. + + :rtype: :class:`boto.ec2.keypair.KeyPair` + :return: The new key pair + """ + if region.name == self.region: + raise BotoClientError('Unable to copy to the same Region') + conn_params = self.connection.get_params() + rconn = region.connect(**conn_params) + kp = rconn.create_key_pair(self.name, dry_run=dry_run) + return kp diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/launchspecification.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/launchspecification.py new file mode 100644 index 0000000000000000000000000000000000000000..f145ac4799e10e7ef37b0fa353570eaef0ba4d7a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/launchspecification.py @@ -0,0 +1,105 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a launch specification for Spot instances. +""" + +from boto.ec2.ec2object import EC2Object +from boto.resultset import ResultSet +from boto.ec2.blockdevicemapping import BlockDeviceMapping +from boto.ec2.group import Group +from boto.ec2.instance import SubParse + + +class GroupList(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'groupId': + self.append(value) + + +class LaunchSpecification(EC2Object): + + def __init__(self, connection=None): + super(LaunchSpecification, self).__init__(connection) + self.key_name = None + self.instance_type = None + self.image_id = None + self.groups = [] + self.placement = None + self.kernel = None + self.ramdisk = None + self.monitored = False + self.subnet_id = None + self._in_monitoring_element = False + self.block_device_mapping = None + self.instance_profile = None + self.ebs_optimized = False + + def __repr__(self): + return 'LaunchSpecification(%s)' % self.image_id + + def startElement(self, name, attrs, connection): + if name == 'groupSet': + self.groups = ResultSet([('item', Group)]) + return self.groups + elif name == 'monitoring': + self._in_monitoring_element = True + elif name == 'blockDeviceMapping': + self.block_device_mapping = BlockDeviceMapping() + return self.block_device_mapping + elif name == 'iamInstanceProfile': + self.instance_profile = SubParse('iamInstanceProfile') + return self.instance_profile + else: + return None + + def endElement(self, name, value, connection): + if name == 'imageId': + self.image_id = value + elif name == 'keyName': + self.key_name = value + elif name == 'instanceType': + self.instance_type = value + elif name == 'availabilityZone': + self.placement = value + elif name == 'placement': + pass + elif name == 'kernelId': + self.kernel = value + elif name == 'ramdiskId': + self.ramdisk = value + elif name == 'subnetId': + self.subnet_id = value + elif name == 'state': + if self._in_monitoring_element: + if value == 'enabled': + self.monitored = True + self._in_monitoring_element = False + elif name == 'ebsOptimized': + self.ebs_optimized = (value == 'true') + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/networkinterface.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/networkinterface.py new file mode 100644 index 0000000000000000000000000000000000000000..9bbeb7715eb10433471103e9f3655bc235cb5408 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/networkinterface.py @@ -0,0 +1,351 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Elastic Network Interface +""" +from boto.exception import BotoClientError +from boto.ec2.ec2object import TaggedEC2Object +from boto.resultset import ResultSet +from boto.ec2.group import Group + + +class Attachment(object): + """ + :ivar id: The ID of the attachment. + :ivar instance_id: The ID of the instance. + :ivar device_index: The index of this device. + :ivar status: The status of the device. + :ivar attach_time: The time the device was attached. + :ivar delete_on_termination: Whether the device will be deleted + when the instance is terminated. + """ + + def __init__(self): + self.id = None + self.instance_id = None + self.instance_owner_id = None + self.device_index = 0 + self.status = None + self.attach_time = None + self.delete_on_termination = False + + def __repr__(self): + return 'Attachment:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'attachmentId': + self.id = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'deviceIndex': + self.device_index = int(value) + elif name == 'instanceOwnerId': + self.instance_owner_id = value + elif name == 'status': + self.status = value + elif name == 'attachTime': + self.attach_time = value + elif name == 'deleteOnTermination': + if value.lower() == 'true': + self.delete_on_termination = True + else: + self.delete_on_termination = False + else: + setattr(self, name, value) + + +class NetworkInterface(TaggedEC2Object): + """ + An Elastic Network Interface. + + :ivar id: The ID of the ENI. + :ivar subnet_id: The ID of the VPC subnet. + :ivar vpc_id: The ID of the VPC. + :ivar description: The description. + :ivar owner_id: The ID of the owner of the ENI. + :ivar requester_managed: + :ivar status: The interface's status (available|in-use). + :ivar mac_address: The MAC address of the interface. + :ivar private_ip_address: The IP address of the interface within + the subnet. + :ivar source_dest_check: Flag to indicate whether to validate + network traffic to or from this network interface. + :ivar groups: List of security groups associated with the interface. + :ivar attachment: The attachment object. + :ivar private_ip_addresses: A list of PrivateIPAddress objects. + """ + + def __init__(self, connection=None): + super(NetworkInterface, self).__init__(connection) + self.id = None + self.subnet_id = None + self.vpc_id = None + self.availability_zone = None + self.description = None + self.owner_id = None + self.requester_managed = False + self.status = None + self.mac_address = None + self.private_ip_address = None + self.source_dest_check = None + self.groups = [] + self.attachment = None + self.private_ip_addresses = [] + + def __repr__(self): + return 'NetworkInterface:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(NetworkInterface, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'groupSet': + self.groups = ResultSet([('item', Group)]) + return self.groups + elif name == 'attachment': + self.attachment = Attachment() + return self.attachment + elif name == 'privateIpAddressesSet': + self.private_ip_addresses = ResultSet([('item', PrivateIPAddress)]) + return self.private_ip_addresses + else: + return None + + def endElement(self, name, value, connection): + if name == 'networkInterfaceId': + self.id = value + elif name == 'subnetId': + self.subnet_id = value + elif name == 'vpcId': + self.vpc_id = value + elif name == 'availabilityZone': + self.availability_zone = value + elif name == 'description': + self.description = value + elif name == 'ownerId': + self.owner_id = value + elif name == 'requesterManaged': + if value.lower() == 'true': + self.requester_managed = True + else: + self.requester_managed = False + elif name == 'status': + self.status = value + elif name == 'macAddress': + self.mac_address = value + elif name == 'privateIpAddress': + self.private_ip_address = value + elif name == 'sourceDestCheck': + if value.lower() == 'true': + self.source_dest_check = True + else: + self.source_dest_check = False + else: + setattr(self, name, value) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self, validate=False, dry_run=False): + """ + Update the data associated with this ENI by querying EC2. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + ENI the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + rs = self.connection.get_all_network_interfaces( + [self.id], + dry_run=dry_run + ) + if len(rs) > 0: + self._update(rs[0]) + elif validate: + raise ValueError('%s is not a valid ENI ID' % self.id) + return self.status + + def attach(self, instance_id, device_index, dry_run=False): + """ + Attach this ENI to an EC2 instance. + + :type instance_id: str + :param instance_id: The ID of the EC2 instance to which it will + be attached. + + :type device_index: int + :param device_index: The interface nunber, N, on the instance (eg. ethN) + + :rtype: bool + :return: True if successful + """ + return self.connection.attach_network_interface( + self.id, + instance_id, + device_index, + dry_run=dry_run + ) + + def detach(self, force=False, dry_run=False): + """ + Detach this ENI from an EC2 instance. + + :type force: bool + :param force: Forces detachment if the previous detachment + attempt did not occur cleanly. + + :rtype: bool + :return: True if successful + """ + attachment_id = getattr(self.attachment, 'id', None) + + return self.connection.detach_network_interface( + attachment_id, + force, + dry_run=dry_run + ) + + def delete(self, dry_run=False): + return self.connection.delete_network_interface( + self.id, + dry_run=dry_run + ) + + +class PrivateIPAddress(object): + def __init__(self, connection=None, private_ip_address=None, + primary=None): + self.connection = connection + self.private_ip_address = private_ip_address + self.primary = primary + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'privateIpAddress': + self.private_ip_address = value + elif name == 'primary': + self.primary = True if value.lower() == 'true' else False + + def __repr__(self): + return "PrivateIPAddress(%s, primary=%s)" % (self.private_ip_address, + self.primary) + + +class NetworkInterfaceCollection(list): + def __init__(self, *interfaces): + self.extend(interfaces) + + def build_list_params(self, params, prefix=''): + for i, spec in enumerate(self): + full_prefix = '%sNetworkInterface.%s.' % (prefix, i) + if spec.network_interface_id is not None: + params[full_prefix + 'NetworkInterfaceId'] = \ + str(spec.network_interface_id) + if spec.device_index is not None: + params[full_prefix + 'DeviceIndex'] = \ + str(spec.device_index) + else: + params[full_prefix + 'DeviceIndex'] = 0 + if spec.subnet_id is not None: + params[full_prefix + 'SubnetId'] = str(spec.subnet_id) + if spec.description is not None: + params[full_prefix + 'Description'] = str(spec.description) + if spec.delete_on_termination is not None: + params[full_prefix + 'DeleteOnTermination'] = \ + 'true' if spec.delete_on_termination else 'false' + if spec.secondary_private_ip_address_count is not None: + params[full_prefix + 'SecondaryPrivateIpAddressCount'] = \ + str(spec.secondary_private_ip_address_count) + if spec.private_ip_address is not None: + params[full_prefix + 'PrivateIpAddress'] = \ + str(spec.private_ip_address) + if spec.groups is not None: + for j, group_id in enumerate(spec.groups): + query_param_key = '%sSecurityGroupId.%s' % (full_prefix, j) + params[query_param_key] = str(group_id) + if spec.private_ip_addresses is not None: + for k, ip_addr in enumerate(spec.private_ip_addresses): + query_param_key_prefix = ( + '%sPrivateIpAddresses.%s' % (full_prefix, k)) + params[query_param_key_prefix + '.PrivateIpAddress'] = \ + str(ip_addr.private_ip_address) + if ip_addr.primary is not None: + params[query_param_key_prefix + '.Primary'] = \ + 'true' if ip_addr.primary else 'false' + + # Associating Public IPs have special logic around them: + # + # * Only assignable on an device_index of ``0`` + # * Only on one interface + # * Only if there are no other interfaces being created + # * Only if it's a new interface (which we can't really guard + # against) + # + # More details on http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-RunInstances.html + if spec.associate_public_ip_address is not None: + if not params[full_prefix + 'DeviceIndex'] in (0, '0'): + raise BotoClientError( + "Only the interface with device index of 0 can " + \ + "be provided when using " + \ + "'associate_public_ip_address'." + ) + + if len(self) > 1: + raise BotoClientError( + "Only one interface can be provided when using " + \ + "'associate_public_ip_address'." + ) + + key = full_prefix + 'AssociatePublicIpAddress' + + if spec.associate_public_ip_address: + params[key] = 'true' + else: + params[key] = 'false' + + +class NetworkInterfaceSpecification(object): + def __init__(self, network_interface_id=None, device_index=None, + subnet_id=None, description=None, private_ip_address=None, + groups=None, delete_on_termination=None, + private_ip_addresses=None, + secondary_private_ip_address_count=None, + associate_public_ip_address=None): + self.network_interface_id = network_interface_id + self.device_index = device_index + self.subnet_id = subnet_id + self.description = description + self.private_ip_address = private_ip_address + self.groups = groups + self.delete_on_termination = delete_on_termination + self.private_ip_addresses = private_ip_addresses + self.secondary_private_ip_address_count = \ + secondary_private_ip_address_count + self.associate_public_ip_address = associate_public_ip_address diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/placementgroup.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/placementgroup.py new file mode 100644 index 0000000000000000000000000000000000000000..0c2596616d6726e70c1ba0cd6920b8063d8203d2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/placementgroup.py @@ -0,0 +1,53 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +Represents an EC2 Placement Group +""" +from boto.ec2.ec2object import EC2Object +from boto.exception import BotoClientError + + +class PlacementGroup(EC2Object): + + def __init__(self, connection=None, name=None, strategy=None, state=None): + super(PlacementGroup, self).__init__(connection) + self.name = name + self.strategy = strategy + self.state = state + + def __repr__(self): + return 'PlacementGroup:%s' % self.name + + def endElement(self, name, value, connection): + if name == 'groupName': + self.name = value + elif name == 'strategy': + self.strategy = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + + def delete(self, dry_run=False): + return self.connection.delete_placement_group( + self.name, + dry_run=dry_run + ) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/regioninfo.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/regioninfo.py new file mode 100644 index 0000000000000000000000000000000000000000..21a56fb92704965a738cea08efb51dc56026a9af --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/regioninfo.py @@ -0,0 +1,36 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.regioninfo import RegionInfo + + +class EC2RegionInfo(RegionInfo): + """ + Represents an EC2 Region + """ + + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + from boto.ec2.connection import EC2Connection + super(EC2RegionInfo, self).__init__(connection, name, endpoint, + EC2Connection) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/reservedinstance.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/reservedinstance.py new file mode 100644 index 0000000000000000000000000000000000000000..5ccc008e1b84909362903d101dec2d8c202d009b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/reservedinstance.py @@ -0,0 +1,352 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from boto.resultset import ResultSet +from boto.ec2.ec2object import EC2Object +from boto.utils import parse_ts + + +class ReservedInstancesOffering(EC2Object): + + def __init__(self, connection=None, id=None, instance_type=None, + availability_zone=None, duration=None, fixed_price=None, + usage_price=None, description=None, instance_tenancy=None, + currency_code=None, offering_type=None, + recurring_charges=None, pricing_details=None): + super(ReservedInstancesOffering, self).__init__(connection) + self.id = id + self.instance_type = instance_type + self.availability_zone = availability_zone + self.duration = duration + self.fixed_price = fixed_price + self.usage_price = usage_price + self.description = description + self.instance_tenancy = instance_tenancy + self.currency_code = currency_code + self.offering_type = offering_type + self.recurring_charges = recurring_charges + self.pricing_details = pricing_details + + def __repr__(self): + return 'ReservedInstanceOffering:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'recurringCharges': + self.recurring_charges = ResultSet([('item', RecurringCharge)]) + return self.recurring_charges + elif name == 'pricingDetailsSet': + self.pricing_details = ResultSet([('item', PricingDetail)]) + return self.pricing_details + return None + + def endElement(self, name, value, connection): + if name == 'reservedInstancesOfferingId': + self.id = value + elif name == 'instanceType': + self.instance_type = value + elif name == 'availabilityZone': + self.availability_zone = value + elif name == 'duration': + self.duration = int(value) + elif name == 'fixedPrice': + self.fixed_price = value + elif name == 'usagePrice': + self.usage_price = value + elif name == 'productDescription': + self.description = value + elif name == 'instanceTenancy': + self.instance_tenancy = value + elif name == 'currencyCode': + self.currency_code = value + elif name == 'offeringType': + self.offering_type = value + elif name == 'marketplace': + self.marketplace = True if value == 'true' else False + + def describe(self): + print('ID=%s' % self.id) + print('\tInstance Type=%s' % self.instance_type) + print('\tZone=%s' % self.availability_zone) + print('\tDuration=%s' % self.duration) + print('\tFixed Price=%s' % self.fixed_price) + print('\tUsage Price=%s' % self.usage_price) + print('\tDescription=%s' % self.description) + + def purchase(self, instance_count=1, dry_run=False): + return self.connection.purchase_reserved_instance_offering( + self.id, + instance_count, + dry_run=dry_run + ) + + +class RecurringCharge(object): + def __init__(self, connection=None, frequency=None, amount=None): + self.frequency = frequency + self.amount = amount + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class PricingDetail(object): + def __init__(self, connection=None, price=None, count=None): + self.price = price + self.count = count + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class ReservedInstance(ReservedInstancesOffering): + + def __init__(self, connection=None, id=None, instance_type=None, + availability_zone=None, duration=None, fixed_price=None, + usage_price=None, description=None, + instance_count=None, state=None): + super(ReservedInstance, self).__init__(connection, id, instance_type, + availability_zone, duration, + fixed_price, usage_price, + description) + self.instance_count = instance_count + self.state = state + self.start = None + self.end = None + + def __repr__(self): + return 'ReservedInstance:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'reservedInstancesId': + self.id = value + if name == 'instanceCount': + self.instance_count = int(value) + elif name == 'state': + self.state = value + elif name == 'start': + self.start = value + elif name == 'end': + self.end = value + else: + super(ReservedInstance, self).endElement(name, value, connection) + + +class ReservedInstanceListing(EC2Object): + def __init__(self, connection=None, listing_id=None, id=None, + create_date=None, update_date=None, + status=None, status_message=None, client_token=None): + self.connection = connection + self.listing_id = listing_id + self.id = id + self.create_date = create_date + self.update_date = update_date + self.status = status + self.status_message = status_message + self.client_token = client_token + + def startElement(self, name, attrs, connection): + if name == 'instanceCounts': + self.instance_counts = ResultSet([('item', InstanceCount)]) + return self.instance_counts + elif name == 'priceSchedules': + self.price_schedules = ResultSet([('item', PriceSchedule)]) + return self.price_schedules + return None + + def endElement(self, name, value, connection): + if name == 'reservedInstancesListingId': + self.listing_id = value + elif name == 'reservedInstancesId': + self.id = value + elif name == 'createDate': + self.create_date = value + elif name == 'updateDate': + self.update_date = value + elif name == 'status': + self.status = value + elif name == 'statusMessage': + self.status_message = value + else: + setattr(self, name, value) + + +class InstanceCount(object): + def __init__(self, connection=None, state=None, instance_count=None): + self.state = state + self.instance_count = instance_count + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'state': + self.state = value + elif name == 'instanceCount': + self.instance_count = int(value) + else: + setattr(self, name, value) + + +class PriceSchedule(object): + def __init__(self, connection=None, term=None, price=None, + currency_code=None, active=None): + self.connection = connection + self.term = term + self.price = price + self.currency_code = currency_code + self.active = active + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'term': + self.term = int(value) + elif name == 'price': + self.price = value + elif name == 'currencyCode': + self.currency_code = value + elif name == 'active': + self.active = True if value == 'true' else False + else: + setattr(self, name, value) + + +class ReservedInstancesConfiguration(object): + def __init__(self, connection=None, availability_zone=None, platform=None, + instance_count=None, instance_type=None): + self.connection = connection + self.availability_zone = availability_zone + self.platform = platform + self.instance_count = instance_count + self.instance_type = instance_type + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'availabilityZone': + self.availability_zone = value + elif name == 'platform': + self.platform = value + elif name == 'instanceCount': + self.instance_count = int(value) + elif name == 'instanceType': + self.instance_type = value + else: + setattr(self, name, value) + + +class ModifyReservedInstancesResult(object): + def __init__(self, connection=None, modification_id=None): + self.connection = connection + self.modification_id = modification_id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'reservedInstancesModificationId': + self.modification_id = value + else: + setattr(self, name, value) + + +class ModificationResult(object): + def __init__(self, connection=None, modification_id=None, + availability_zone=None, platform=None, instance_count=None, + instance_type=None): + self.connection = connection + self.modification_id = modification_id + self.availability_zone = availability_zone + self.platform = platform + self.instance_count = instance_count + self.instance_type = instance_type + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'reservedInstancesModificationId': + self.modification_id = value + elif name == 'availabilityZone': + self.availability_zone = value + elif name == 'platform': + self.platform = value + elif name == 'instanceCount': + self.instance_count = int(value) + elif name == 'instanceType': + self.instance_type = value + else: + setattr(self, name, value) + + +class ReservedInstancesModification(object): + def __init__(self, connection=None, modification_id=None, + reserved_instances=None, modification_results=None, + create_date=None, update_date=None, effective_date=None, + status=None, status_message=None, client_token=None): + self.connection = connection + self.modification_id = modification_id + self.reserved_instances = reserved_instances + self.modification_results = modification_results + self.create_date = create_date + self.update_date = update_date + self.effective_date = effective_date + self.status = status + self.status_message = status_message + self.client_token = client_token + + def startElement(self, name, attrs, connection): + if name == 'reservedInstancesSet': + self.reserved_instances = ResultSet([ + ('item', ReservedInstance) + ]) + return self.reserved_instances + elif name == 'modificationResultSet': + self.modification_results = ResultSet([ + ('item', ModificationResult) + ]) + return self.modification_results + return None + + def endElement(self, name, value, connection): + if name == 'reservedInstancesModificationId': + self.modification_id = value + elif name == 'createDate': + self.create_date = parse_ts(value) + elif name == 'updateDate': + self.update_date = parse_ts(value) + elif name == 'effectiveDate': + self.effective_date = parse_ts(value) + elif name == 'status': + self.status = value + elif name == 'statusMessage': + self.status_message = value + elif name == 'clientToken': + self.client_token = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/securitygroup.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/securitygroup.py new file mode 100644 index 0000000000000000000000000000000000000000..dec49cb278dc499bc8e0348ec09814d84c44a9e8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/securitygroup.py @@ -0,0 +1,392 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Security Group +""" +from boto.ec2.ec2object import TaggedEC2Object +from boto.exception import BotoClientError + + +class SecurityGroup(TaggedEC2Object): + + def __init__(self, connection=None, owner_id=None, + name=None, description=None, id=None): + super(SecurityGroup, self).__init__(connection) + self.id = id + self.owner_id = owner_id + self.name = name + self.description = description + self.vpc_id = None + self.rules = IPPermissionsList() + self.rules_egress = IPPermissionsList() + + def __repr__(self): + return 'SecurityGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + retval = super(SecurityGroup, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'ipPermissions': + return self.rules + elif name == 'ipPermissionsEgress': + return self.rules_egress + else: + return None + + def endElement(self, name, value, connection): + if name == 'ownerId': + self.owner_id = value + elif name == 'groupId': + self.id = value + elif name == 'groupName': + self.name = value + elif name == 'vpcId': + self.vpc_id = value + elif name == 'groupDescription': + self.description = value + elif name == 'ipRanges': + pass + elif name == 'return': + if value == 'false': + self.status = False + elif value == 'true': + self.status = True + else: + raise Exception( + 'Unexpected value of status %s for group %s' % ( + value, + self.name + ) + ) + else: + setattr(self, name, value) + + def delete(self, dry_run=False): + if self.vpc_id: + return self.connection.delete_security_group( + group_id=self.id, + dry_run=dry_run + ) + else: + return self.connection.delete_security_group( + self.name, + dry_run=dry_run + ) + + def add_rule(self, ip_protocol, from_port, to_port, + src_group_name, src_group_owner_id, cidr_ip, + src_group_group_id, dry_run=False): + """ + Add a rule to the SecurityGroup object. Note that this method + only changes the local version of the object. No information + is sent to EC2. + """ + rule = IPPermissions(self) + rule.ip_protocol = ip_protocol + rule.from_port = from_port + rule.to_port = to_port + self.rules.append(rule) + rule.add_grant( + src_group_name, + src_group_owner_id, + cidr_ip, + src_group_group_id, + dry_run=dry_run + ) + + def remove_rule(self, ip_protocol, from_port, to_port, + src_group_name, src_group_owner_id, cidr_ip, + src_group_group_id, dry_run=False): + """ + Remove a rule to the SecurityGroup object. Note that this method + only changes the local version of the object. No information + is sent to EC2. + """ + if not self.rules: + raise ValueError("The security group has no rules") + + target_rule = None + for rule in self.rules: + if rule.ip_protocol == ip_protocol: + if rule.from_port == from_port: + if rule.to_port == to_port: + target_rule = rule + target_grant = None + for grant in rule.grants: + if grant.name == src_group_name or grant.group_id == src_group_group_id: + if grant.owner_id == src_group_owner_id: + if grant.cidr_ip == cidr_ip: + target_grant = grant + if target_grant: + rule.grants.remove(target_grant) + if len(rule.grants) == 0: + self.rules.remove(target_rule) + + def authorize(self, ip_protocol=None, from_port=None, to_port=None, + cidr_ip=None, src_group=None, dry_run=False): + """ + Add a new rule to this security group. + You need to pass in either src_group_name + OR ip_protocol, from_port, to_port, + and cidr_ip. In other words, either you are authorizing another + group or you are authorizing some ip-based rule. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are enabling + + :type to_port: int + :param to_port: The ending port number you are enabling + + :type cidr_ip: string or list of strings + :param cidr_ip: The CIDR block you are providing access to. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :type src_group: :class:`boto.ec2.securitygroup.SecurityGroup` or + :class:`boto.ec2.securitygroup.GroupOrCIDR` + :param src_group: The Security Group you are granting access to. + + :rtype: bool + :return: True if successful. + """ + group_name = None + if not self.vpc_id: + group_name = self.name + group_id = None + if self.vpc_id: + group_id = self.id + src_group_name = None + src_group_owner_id = None + src_group_group_id = None + if src_group: + cidr_ip = None + src_group_owner_id = src_group.owner_id + if not self.vpc_id: + src_group_name = src_group.name + else: + if hasattr(src_group, 'group_id'): + src_group_group_id = src_group.group_id + else: + src_group_group_id = src_group.id + status = self.connection.authorize_security_group(group_name, + src_group_name, + src_group_owner_id, + ip_protocol, + from_port, + to_port, + cidr_ip, + group_id, + src_group_group_id, + dry_run=dry_run) + if status: + if not isinstance(cidr_ip, list): + cidr_ip = [cidr_ip] + for single_cidr_ip in cidr_ip: + self.add_rule(ip_protocol, from_port, to_port, src_group_name, + src_group_owner_id, single_cidr_ip, + src_group_group_id, dry_run=dry_run) + return status + + def revoke(self, ip_protocol=None, from_port=None, to_port=None, + cidr_ip=None, src_group=None, dry_run=False): + group_name = None + if not self.vpc_id: + group_name = self.name + group_id = None + if self.vpc_id: + group_id = self.id + src_group_name = None + src_group_owner_id = None + src_group_group_id = None + if src_group: + cidr_ip = None + src_group_owner_id = src_group.owner_id + if not self.vpc_id: + src_group_name = src_group.name + else: + if hasattr(src_group, 'group_id'): + src_group_group_id = src_group.group_id + else: + src_group_group_id = src_group.id + status = self.connection.revoke_security_group(group_name, + src_group_name, + src_group_owner_id, + ip_protocol, + from_port, + to_port, + cidr_ip, + group_id, + src_group_group_id, + dry_run=dry_run) + if status: + self.remove_rule(ip_protocol, from_port, to_port, src_group_name, + src_group_owner_id, cidr_ip, src_group_group_id, + dry_run=dry_run) + return status + + def copy_to_region(self, region, name=None, dry_run=False): + """ + Create a copy of this security group in another region. + Note that the new security group will be a separate entity + and will not stay in sync automatically after the copy + operation. + + :type region: :class:`boto.ec2.regioninfo.RegionInfo` + :param region: The region to which this security group will be copied. + + :type name: string + :param name: The name of the copy. If not supplied, the copy + will have the same name as this security group. + + :rtype: :class:`boto.ec2.securitygroup.SecurityGroup` + :return: The new security group. + """ + if region.name == self.region: + raise BotoClientError('Unable to copy to the same Region') + conn_params = self.connection.get_params() + rconn = region.connect(**conn_params) + sg = rconn.create_security_group( + name or self.name, + self.description, + dry_run=dry_run + ) + source_groups = [] + for rule in self.rules: + for grant in rule.grants: + grant_nom = grant.name or grant.group_id + if grant_nom: + if grant_nom not in source_groups: + source_groups.append(grant_nom) + sg.authorize(None, None, None, None, grant, + dry_run=dry_run) + else: + sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port, + grant.cidr_ip, dry_run=dry_run) + return sg + + def instances(self, dry_run=False): + """ + Find all of the current instances that are running within this + security group. + + :rtype: list of :class:`boto.ec2.instance.Instance` + :return: A list of Instance objects + """ + rs = [] + if self.vpc_id: + rs.extend(self.connection.get_all_reservations( + filters={'instance.group-id': self.id}, + dry_run=dry_run + )) + else: + rs.extend(self.connection.get_all_reservations( + filters={'group-id': self.id}, + dry_run=dry_run + )) + instances = [i for r in rs for i in r.instances] + return instances + + +class IPPermissionsList(list): + + def startElement(self, name, attrs, connection): + if name == 'item': + self.append(IPPermissions(self)) + return self[-1] + return None + + def endElement(self, name, value, connection): + pass + + +class IPPermissions(object): + + def __init__(self, parent=None): + self.parent = parent + self.ip_protocol = None + self.from_port = None + self.to_port = None + self.grants = [] + + def __repr__(self): + return 'IPPermissions:%s(%s-%s)' % (self.ip_protocol, + self.from_port, self.to_port) + + def startElement(self, name, attrs, connection): + if name == 'item': + self.grants.append(GroupOrCIDR(self)) + return self.grants[-1] + return None + + def endElement(self, name, value, connection): + if name == 'ipProtocol': + self.ip_protocol = value + elif name == 'fromPort': + self.from_port = value + elif name == 'toPort': + self.to_port = value + else: + setattr(self, name, value) + + def add_grant(self, name=None, owner_id=None, cidr_ip=None, group_id=None, + dry_run=False): + grant = GroupOrCIDR(self) + grant.owner_id = owner_id + grant.group_id = group_id + grant.name = name + grant.cidr_ip = cidr_ip + self.grants.append(grant) + return grant + + +class GroupOrCIDR(object): + + def __init__(self, parent=None): + self.owner_id = None + self.group_id = None + self.name = None + self.cidr_ip = None + + def __repr__(self): + if self.cidr_ip: + return '%s' % self.cidr_ip + else: + return '%s-%s' % (self.name or self.group_id, self.owner_id) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'userId': + self.owner_id = value + elif name == 'groupId': + self.group_id = value + elif name == 'groupName': + self.name = value + if name == 'cidrIp': + self.cidr_ip = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/snapshot.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/snapshot.py new file mode 100644 index 0000000000000000000000000000000000000000..eaf7164cf9856616724ec6019af9c3026d1c24ef --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/snapshot.py @@ -0,0 +1,187 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Elastic Block Store Snapshot +""" +from boto.ec2.ec2object import TaggedEC2Object +from boto.ec2.zone import Zone + + +class Snapshot(TaggedEC2Object): + AttrName = 'createVolumePermission' + + def __init__(self, connection=None): + super(Snapshot, self).__init__(connection) + self.id = None + self.volume_id = None + self.status = None + self.progress = None + self.start_time = None + self.owner_id = None + self.owner_alias = None + self.volume_size = None + self.description = None + self.encrypted = None + + def __repr__(self): + return 'Snapshot:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'snapshotId': + self.id = value + elif name == 'volumeId': + self.volume_id = value + elif name == 'status': + self.status = value + elif name == 'startTime': + self.start_time = value + elif name == 'ownerId': + self.owner_id = value + elif name == 'ownerAlias': + self.owner_alias = value + elif name == 'volumeSize': + try: + self.volume_size = int(value) + except: + self.volume_size = value + elif name == 'description': + self.description = value + elif name == 'encrypted': + self.encrypted = (value.lower() == 'true') + else: + setattr(self, name, value) + + def _update(self, updated): + self.progress = updated.progress + self.status = updated.status + + def update(self, validate=False, dry_run=False): + """ + Update the data associated with this snapshot by querying EC2. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + snapshot the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + rs = self.connection.get_all_snapshots([self.id], dry_run=dry_run) + if len(rs) > 0: + self._update(rs[0]) + elif validate: + raise ValueError('%s is not a valid Snapshot ID' % self.id) + return self.progress + + def delete(self, dry_run=False): + return self.connection.delete_snapshot(self.id, dry_run=dry_run) + + def get_permissions(self, dry_run=False): + attrs = self.connection.get_snapshot_attribute( + self.id, + self.AttrName, + dry_run=dry_run + ) + return attrs.attrs + + def share(self, user_ids=None, groups=None, dry_run=False): + return self.connection.modify_snapshot_attribute(self.id, + self.AttrName, + 'add', + user_ids, + groups, + dry_run=dry_run) + + def unshare(self, user_ids=None, groups=None, dry_run=False): + return self.connection.modify_snapshot_attribute(self.id, + self.AttrName, + 'remove', + user_ids, + groups, + dry_run=dry_run) + + def reset_permissions(self, dry_run=False): + return self.connection.reset_snapshot_attribute( + self.id, + self.AttrName, + dry_run=dry_run + ) + + def create_volume(self, zone, size=None, volume_type=None, iops=None, + dry_run=False): + """ + Create a new EBS Volume from this Snapshot + + :type zone: string or :class:`boto.ec2.zone.Zone` + :param zone: The availability zone in which the Volume will be created. + + :type size: int + :param size: The size of the new volume, in GiB. (optional). Defaults to + the size of the snapshot. + + :type volume_type: string + :param volume_type: The type of the volume. (optional). Valid + values are: standard | io1 | gp2. + + :type iops: int + :param iops: The provisioned IOPs you want to associate with + this volume. (optional) + """ + if isinstance(zone, Zone): + zone = zone.name + return self.connection.create_volume( + size, + zone, + self.id, + volume_type, + iops, + self.encrypted, + dry_run=dry_run + ) + + +class SnapshotAttribute(object): + def __init__(self, parent=None): + self.snapshot_id = None + self.attrs = {} + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'createVolumePermission': + self.name = 'create_volume_permission' + elif name == 'group': + if 'groups' in self.attrs: + self.attrs['groups'].append(value) + else: + self.attrs['groups'] = [value] + elif name == 'userId': + if 'user_ids' in self.attrs: + self.attrs['user_ids'].append(value) + else: + self.attrs['user_ids'] = [value] + elif name == 'snapshotId': + self.snapshot_id = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/spotdatafeedsubscription.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/spotdatafeedsubscription.py new file mode 100644 index 0000000000000000000000000000000000000000..d0e0be8d3c11b62e5a9849b8ad1bbca58102457a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/spotdatafeedsubscription.py @@ -0,0 +1,65 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Spot Instance Datafeed Subscription +""" +from boto.ec2.ec2object import EC2Object +from boto.ec2.spotinstancerequest import SpotInstanceStateFault + + +class SpotDatafeedSubscription(EC2Object): + + def __init__(self, connection=None, owner_id=None, + bucket=None, prefix=None, state=None, fault=None): + super(SpotDatafeedSubscription, self).__init__(connection) + self.owner_id = owner_id + self.bucket = bucket + self.prefix = prefix + self.state = state + self.fault = fault + + def __repr__(self): + return 'SpotDatafeedSubscription:%s' % self.bucket + + def startElement(self, name, attrs, connection): + if name == 'fault': + self.fault = SpotInstanceStateFault() + return self.fault + else: + return None + + def endElement(self, name, value, connection): + if name == 'ownerId': + self.owner_id = value + elif name == 'bucket': + self.bucket = value + elif name == 'prefix': + self.prefix = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + + def delete(self, dry_run=False): + return self.connection.delete_spot_datafeed_subscription( + dry_run=dry_run + ) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/spotinstancerequest.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/spotinstancerequest.py new file mode 100644 index 0000000000000000000000000000000000000000..da087fefdbef68bef6b4daf055713bd53160f85a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/spotinstancerequest.py @@ -0,0 +1,192 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Spot Instance Request +""" + +from boto.ec2.ec2object import TaggedEC2Object +from boto.ec2.launchspecification import LaunchSpecification + + +class SpotInstanceStateFault(object): + """ + The fault codes for the Spot Instance request, if any. + + :ivar code: The reason code for the Spot Instance state change. + :ivar message: The message for the Spot Instance state change. + """ + + def __init__(self, code=None, message=None): + self.code = code + self.message = message + + def __repr__(self): + return '(%s, %s)' % (self.code, self.message) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'code': + self.code = value + elif name == 'message': + self.message = value + setattr(self, name, value) + + +class SpotInstanceStatus(object): + """ + Contains the status of a Spot Instance Request. + + :ivar code: Status code of the request. + :ivar message: The description for the status code for the Spot request. + :ivar update_time: Time the status was stated. + """ + + def __init__(self, code=None, update_time=None, message=None): + self.code = code + self.update_time = update_time + self.message = message + + def __repr__(self): + return '' % self.code + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'code': + self.code = value + elif name == 'message': + self.message = value + elif name == 'updateTime': + self.update_time = value + + +class SpotInstanceRequest(TaggedEC2Object): + """ + + :ivar id: The ID of the Spot Instance Request. + :ivar price: The maximum hourly price for any Spot Instance launched to + fulfill the request. + :ivar type: The Spot Instance request type. + :ivar state: The state of the Spot Instance request. + :ivar fault: The fault codes for the Spot Instance request, if any. + :ivar valid_from: The start date of the request. If this is a one-time + request, the request becomes active at this date and time and remains + active until all instances launch, the request expires, or the request is + canceled. If the request is persistent, the request becomes active at this + date and time and remains active until it expires or is canceled. + :ivar valid_until: The end date of the request. If this is a one-time + request, the request remains active until all instances launch, the request + is canceled, or this date is reached. If the request is persistent, it + remains active until it is canceled or this date is reached. + :ivar launch_group: The instance launch group. Launch groups are Spot + Instances that launch together and terminate together. + :ivar launched_availability_zone: foo + :ivar product_description: The Availability Zone in which the bid is + launched. + :ivar availability_zone_group: The Availability Zone group. If you specify + the same Availability Zone group for all Spot Instance requests, all Spot + Instances are launched in the same Availability Zone. + :ivar create_time: The time stamp when the Spot Instance request was + created. + :ivar launch_specification: Additional information for launching instances. + :ivar instance_id: The instance ID, if an instance has been launched to + fulfill the Spot Instance request. + :ivar status: The status code and status message describing the Spot + Instance request. + + """ + + def __init__(self, connection=None): + super(SpotInstanceRequest, self).__init__(connection) + self.id = None + self.price = None + self.type = None + self.state = None + self.fault = None + self.valid_from = None + self.valid_until = None + self.launch_group = None + self.launched_availability_zone = None + self.product_description = None + self.availability_zone_group = None + self.create_time = None + self.launch_specification = None + self.instance_id = None + self.status = None + + def __repr__(self): + return 'SpotInstanceRequest:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(SpotInstanceRequest, self).startElement(name, attrs, + connection) + if retval is not None: + return retval + if name == 'launchSpecification': + self.launch_specification = LaunchSpecification(connection) + return self.launch_specification + elif name == 'fault': + self.fault = SpotInstanceStateFault() + return self.fault + elif name == 'status': + self.status = SpotInstanceStatus() + return self.status + else: + return None + + def endElement(self, name, value, connection): + if name == 'spotInstanceRequestId': + self.id = value + elif name == 'spotPrice': + self.price = float(value) + elif name == 'type': + self.type = value + elif name == 'state': + self.state = value + elif name == 'validFrom': + self.valid_from = value + elif name == 'validUntil': + self.valid_until = value + elif name == 'launchGroup': + self.launch_group = value + elif name == 'availabilityZoneGroup': + self.availability_zone_group = value + elif name == 'launchedAvailabilityZone': + self.launched_availability_zone = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'createTime': + self.create_time = value + elif name == 'productDescription': + self.product_description = value + else: + setattr(self, name, value) + + def cancel(self, dry_run=False): + self.connection.cancel_spot_instance_requests( + [self.id], + dry_run=dry_run + ) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/spotpricehistory.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/spotpricehistory.py new file mode 100644 index 0000000000000000000000000000000000000000..ac125de6830002a4c342c64852e7c257362ad6f3 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/spotpricehistory.py @@ -0,0 +1,54 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Spot Instance Request +""" + +from boto.ec2.ec2object import EC2Object + + +class SpotPriceHistory(EC2Object): + + def __init__(self, connection=None): + super(SpotPriceHistory, self).__init__(connection) + self.price = 0.0 + self.instance_type = None + self.product_description = None + self.timestamp = None + self.availability_zone = None + + def __repr__(self): + return 'SpotPriceHistory(%s):%2f' % (self.instance_type, self.price) + + def endElement(self, name, value, connection): + if name == 'instanceType': + self.instance_type = value + elif name == 'spotPrice': + self.price = float(value) + elif name == 'productDescription': + self.product_description = value + elif name == 'timestamp': + self.timestamp = value + elif name == 'availabilityZone': + self.availability_zone = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/tag.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/tag.py new file mode 100644 index 0000000000000000000000000000000000000000..deb2c788000a4b5bd848cdd082dccec4237e01bc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/tag.py @@ -0,0 +1,84 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class TagSet(dict): + """ + A TagSet is used to collect the tags associated with a particular + EC2 resource. Not all resources can be tagged but for those that + can, this dict object will be used to collect those values. See + :class:`boto.ec2.ec2object.TaggedEC2Object` for more details. + """ + + def __init__(self, connection=None): + self.connection = connection + self._current_key = None + self._current_value = None + + def startElement(self, name, attrs, connection): + if name == 'item': + self._current_key = None + self._current_value = None + return None + + def endElement(self, name, value, connection): + if name == 'key': + self._current_key = value + elif name == 'value': + self._current_value = value + elif name == 'item': + self[self._current_key] = self._current_value + + +class Tag(object): + """ + A Tag is used when creating or listing all tags related to + an AWS account. It records not only the key and value but + also the ID of the resource to which the tag is attached + as well as the type of the resource. + """ + + def __init__(self, connection=None, res_id=None, res_type=None, + name=None, value=None): + self.connection = connection + self.res_id = res_id + self.res_type = res_type + self.name = name + self.value = value + + def __repr__(self): + return 'Tag:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'resourceId': + self.res_id = value + elif name == 'resourceType': + self.res_type = value + elif name == 'key': + self.name = value + elif name == 'value': + self.value = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/volume.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/volume.py new file mode 100644 index 0000000000000000000000000000000000000000..c40062b37ce43f5aea6952d896314b4aea102414 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/volume.py @@ -0,0 +1,315 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Elastic Block Storage Volume +""" +from boto.resultset import ResultSet +from boto.ec2.tag import Tag +from boto.ec2.ec2object import TaggedEC2Object + + +class Volume(TaggedEC2Object): + """ + Represents an EBS volume. + + :ivar id: The unique ID of the volume. + :ivar create_time: The timestamp of when the volume was created. + :ivar status: The status of the volume. + :ivar size: The size (in GB) of the volume. + :ivar snapshot_id: The ID of the snapshot this volume was created + from, if applicable. + :ivar attach_data: An AttachmentSet object. + :ivar zone: The availability zone this volume is in. + :ivar type: The type of volume (standard or consistent-iops) + :ivar iops: If this volume is of type consistent-iops, this is + the number of IOPS provisioned (10-300). + :ivar encrypted: True if this volume is encrypted. + """ + + def __init__(self, connection=None): + super(Volume, self).__init__(connection) + self.id = None + self.create_time = None + self.status = None + self.size = None + self.snapshot_id = None + self.attach_data = None + self.zone = None + self.type = None + self.iops = None + self.encrypted = None + + def __repr__(self): + return 'Volume:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(Volume, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'attachmentSet': + self.attach_data = AttachmentSet() + return self.attach_data + elif name == 'tagSet': + self.tags = ResultSet([('item', Tag)]) + return self.tags + else: + return None + + def endElement(self, name, value, connection): + if name == 'volumeId': + self.id = value + elif name == 'createTime': + self.create_time = value + elif name == 'status': + if value != '': + self.status = value + elif name == 'size': + self.size = int(value) + elif name == 'snapshotId': + self.snapshot_id = value + elif name == 'availabilityZone': + self.zone = value + elif name == 'volumeType': + self.type = value + elif name == 'iops': + self.iops = int(value) + elif name == 'encrypted': + self.encrypted = (value.lower() == 'true') + else: + setattr(self, name, value) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self, validate=False, dry_run=False): + """ + Update the data associated with this volume by querying EC2. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + volume the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + # Check the resultset since Eucalyptus ignores the volumeId param + unfiltered_rs = self.connection.get_all_volumes( + [self.id], + dry_run=dry_run + ) + rs = [x for x in unfiltered_rs if x.id == self.id] + if len(rs) > 0: + self._update(rs[0]) + elif validate: + raise ValueError('%s is not a valid Volume ID' % self.id) + return self.status + + def delete(self, dry_run=False): + """ + Delete this EBS volume. + + :rtype: bool + :return: True if successful + """ + return self.connection.delete_volume(self.id, dry_run=dry_run) + + def attach(self, instance_id, device, dry_run=False): + """ + Attach this EBS volume to an EC2 instance. + + :type instance_id: str + :param instance_id: The ID of the EC2 instance to which it will + be attached. + + :type device: str + :param device: The device on the instance through which the + volume will be exposed (e.g. /dev/sdh) + + :rtype: bool + :return: True if successful + """ + return self.connection.attach_volume( + self.id, + instance_id, + device, + dry_run=dry_run + ) + + def detach(self, force=False, dry_run=False): + """ + Detach this EBS volume from an EC2 instance. + + :type force: bool + :param force: Forces detachment if the previous detachment + attempt did not occur cleanly. This option can lead to + data loss or a corrupted file system. Use this option only + as a last resort to detach a volume from a failed + instance. The instance will not have an opportunity to + flush file system caches nor file system meta data. If you + use this option, you must perform file system check and + repair procedures. + + :rtype: bool + :return: True if successful + """ + instance_id = None + if self.attach_data: + instance_id = self.attach_data.instance_id + device = None + if self.attach_data: + device = self.attach_data.device + return self.connection.detach_volume( + self.id, + instance_id, + device, + force, + dry_run=dry_run + ) + + def create_snapshot(self, description=None, dry_run=False): + """ + Create a snapshot of this EBS Volume. + + :type description: str + :param description: A description of the snapshot. + Limited to 256 characters. + + :rtype: :class:`boto.ec2.snapshot.Snapshot` + :return: The created Snapshot object + """ + return self.connection.create_snapshot( + self.id, + description, + dry_run=dry_run + ) + + def volume_state(self): + """ + Returns the state of the volume. Same value as the status attribute. + """ + return self.status + + def attachment_state(self): + """ + Get the attachment state. + """ + state = None + if self.attach_data: + state = self.attach_data.status + return state + + def snapshots(self, owner=None, restorable_by=None, dry_run=False): + """ + Get all snapshots related to this volume. Note that this requires + that all available snapshots for the account be retrieved from EC2 + first and then the list is filtered client-side to contain only + those for this volume. + + :type owner: str + :param owner: If present, only the snapshots owned by the + specified user will be returned. Valid values are: + + * self + * amazon + * AWS Account ID + + :type restorable_by: str + :param restorable_by: If present, only the snapshots that + are restorable by the specified account id will be returned. + + :rtype: list of L{boto.ec2.snapshot.Snapshot} + :return: The requested Snapshot objects + + """ + rs = self.connection.get_all_snapshots( + owner=owner, + restorable_by=restorable_by, + dry_run=dry_run + ) + mine = [] + for snap in rs: + if snap.volume_id == self.id: + mine.append(snap) + return mine + + +class AttachmentSet(object): + """ + Represents an EBS attachmentset. + + :ivar id: The unique ID of the volume. + :ivar instance_id: The unique ID of the attached instance + :ivar status: The status of the attachment + :ivar attach_time: Attached since + :ivar device: The device the instance has mapped + """ + def __init__(self): + self.id = None + self.instance_id = None + self.status = None + self.attach_time = None + self.device = None + + def __repr__(self): + return 'AttachmentSet:%s' % self.id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'volumeId': + self.id = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'status': + self.status = value + elif name == 'attachTime': + self.attach_time = value + elif name == 'device': + self.device = value + else: + setattr(self, name, value) + + +class VolumeAttribute(object): + def __init__(self, parent=None): + self.id = None + self._key_name = None + self.attrs = {} + + def startElement(self, name, attrs, connection): + if name == 'autoEnableIO': + self._key_name = name + return None + + def endElement(self, name, value, connection): + if name == 'value': + if value.lower() == 'true': + self.attrs[self._key_name] = True + else: + self.attrs[self._key_name] = False + elif name == 'volumeId': + self.id = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/volumestatus.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/volumestatus.py new file mode 100644 index 0000000000000000000000000000000000000000..78de2bb04fd0bb82e0b51f7cfae48adaa0e1cdbc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/volumestatus.py @@ -0,0 +1,205 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.instancestatus import Status, Details + + +class Event(object): + """ + A status event for an instance. + + :ivar type: The type of the event. + :ivar id: The ID of the event. + :ivar description: A string describing the reason for the event. + :ivar not_before: A datestring describing the earliest time for + the event. + :ivar not_after: A datestring describing the latest time for + the event. + """ + + def __init__(self, type=None, id=None, description=None, + not_before=None, not_after=None): + self.type = type + self.id = id + self.description = description + self.not_before = not_before + self.not_after = not_after + + def __repr__(self): + return 'Event:%s' % self.type + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'eventType': + self.type = value + elif name == 'eventId': + self.id = value + elif name == 'description': + self.description = value + elif name == 'notBefore': + self.not_before = value + elif name == 'notAfter': + self.not_after = value + else: + setattr(self, name, value) + + +class EventSet(list): + + def startElement(self, name, attrs, connection): + if name == 'item': + event = Event() + self.append(event) + return event + else: + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class Action(object): + """ + An action for an instance. + + :ivar code: The code for the type of the action. + :ivar id: The ID of the event. + :ivar type: The type of the event. + :ivar description: A description of the action. + """ + + def __init__(self, code=None, id=None, description=None, type=None): + self.code = code + self.id = id + self.type = type + self.description = description + + def __repr__(self): + return 'Action:%s' % self.code + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'eventType': + self.type = value + elif name == 'eventId': + self.id = value + elif name == 'description': + self.description = value + elif name == 'code': + self.code = value + else: + setattr(self, name, value) + + +class ActionSet(list): + + def startElement(self, name, attrs, connection): + if name == 'item': + action = Action() + self.append(action) + return action + else: + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class VolumeStatus(object): + """ + Represents an EC2 Volume status as reported by + DescribeVolumeStatus request. + + :ivar id: The volume identifier. + :ivar zone: The availability zone of the volume + :ivar volume_status: A Status object that reports impaired + functionality that arises from problems internal to the instance. + :ivar events: A list of events relevant to the instance. + :ivar actions: A list of events relevant to the instance. + """ + + def __init__(self, id=None, zone=None): + self.id = id + self.zone = zone + self.volume_status = Status() + self.events = None + self.actions = None + + def __repr__(self): + return 'VolumeStatus:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'eventsSet': + self.events = EventSet() + return self.events + elif name == 'actionsSet': + self.actions = ActionSet() + return self.actions + elif name == 'volumeStatus': + return self.volume_status + else: + return None + + def endElement(self, name, value, connection): + if name == 'volumeId': + self.id = value + elif name == 'availabilityZone': + self.zone = value + else: + setattr(self, name, value) + + +class VolumeStatusSet(list): + """ + A list object that contains the results of a call to + DescribeVolumeStatus request. Each element of the + list will be an VolumeStatus object. + + :ivar next_token: If the response was truncated by + the EC2 service, the next_token attribute of the + object will contain the string that needs to be + passed in to the next request to retrieve the next + set of results. + """ + + def __init__(self, connection=None): + list.__init__(self) + self.connection = connection + self.next_token = None + + def startElement(self, name, attrs, connection): + if name == 'item': + status = VolumeStatus() + self.append(status) + return status + else: + return None + + def endElement(self, name, value, connection): + if name == 'NextToken': + self.next_token = value + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2/zone.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2/zone.py new file mode 100644 index 0000000000000000000000000000000000000000..85ed10224b3058cadb6972ec9d4694cd9e4225e9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2/zone.py @@ -0,0 +1,78 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Availability Zone +""" +from boto.ec2.ec2object import EC2Object + + +class MessageSet(list): + """ + A list object that contains messages associated with + an availability zone. + """ + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'message': + self.append(value) + else: + setattr(self, name, value) + + +class Zone(EC2Object): + """ + Represents an Availability Zone. + + :ivar name: The name of the zone. + :ivar state: The current state of the zone. + :ivar region_name: The name of the region the zone is associated with. + :ivar messages: A list of messages related to the zone. + """ + + def __init__(self, connection=None): + super(Zone, self).__init__(connection) + self.name = None + self.state = None + self.region_name = None + self.messages = None + + def __repr__(self): + return 'Zone:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'messageSet': + self.messages = MessageSet() + return self.messages + return None + + def endElement(self, name, value, connection): + if name == 'zoneName': + self.name = value + elif name == 'zoneState': + self.state = value + elif name == 'regionName': + self.region_name = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a8946a0ee73a48f1f1e95330a57559e5b3139837 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon EC2 Container Service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.ec2containerservice import EC2ContainerServiceConnection + return get_regions('', connection_cls=EC2ContainerServiceConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..4ad32aeaa96b920c79b522b0a0b7b8d35a095ef5 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/exceptions.py @@ -0,0 +1,31 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class ServerException(BotoServerError): + pass + + +class ClientException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..4168bdd01769203fdd6e5fbe8f413cc61d5fdc46 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ec2containerservice/layer1.py @@ -0,0 +1,748 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.ec2containerservice import exceptions + + +class EC2ContainerServiceConnection(AWSQueryConnection): + """ + Amazon EC2 Container Service (Amazon ECS) is a highly scalable, + fast, container management service that makes it easy to run, + stop, and manage Docker containers on a cluster of Amazon EC2 + instances. Amazon ECS lets you launch and stop container-enabled + applications with simple API calls, allows you to get the state of + your cluster from a centralized service, and gives you access to + many familiar Amazon EC2 features like security groups, Amazon EBS + volumes, and IAM roles. + + You can use Amazon ECS to schedule the placement of containers + across your cluster based on your resource needs, isolation + policies, and availability requirements. Amazon EC2 Container + Service eliminates the need for you to operate your own cluster + management and configuration management systems or worry about + scaling your management infrastructure. + """ + APIVersion = "2014-11-13" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "ecs.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "ServerException": exceptions.ServerException, + "ClientException": exceptions.ClientException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(EC2ContainerServiceConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_cluster(self, cluster_name=None): + """ + Creates a new Amazon ECS cluster. By default, your account + will receive a `default` cluster when you launch your first + container instance. However, you can create your own cluster + with a unique name with the `CreateCluster` action. + + During the preview, each account is limited to two clusters. + + :type cluster_name: string + :param cluster_name: The name of your cluster. If you do not specify a + name for your cluster, you will create a cluster named `default`. + + """ + params = {} + if cluster_name is not None: + params['clusterName'] = cluster_name + return self._make_request( + action='CreateCluster', + verb='POST', + path='/', params=params) + + def delete_cluster(self, cluster): + """ + Deletes the specified cluster. You must deregister all + container instances from this cluster before you may delete + it. You can list the container instances in a cluster with + ListContainerInstances and deregister them with + DeregisterContainerInstance. + + :type cluster: string + :param cluster: The cluster you want to delete. + + """ + params = {'cluster': cluster, } + return self._make_request( + action='DeleteCluster', + verb='POST', + path='/', params=params) + + def deregister_container_instance(self, container_instance, cluster=None, + force=None): + """ + Deregisters an Amazon ECS container instance from the + specified cluster. This instance will no longer be available + to run tasks. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the container instance you want to + deregister. If you do not specify a cluster, the default cluster is + assumed. + + :type container_instance: string + :param container_instance: The container instance UUID or full Amazon + Resource Name (ARN) of the container instance you want to + deregister. The ARN contains the `arn:aws:ecs` namespace, followed + by the region of the container instance, the AWS account ID of the + container instance owner, the `container-instance` namespace, and + then the container instance UUID. For example, arn:aws:ecs: region + : aws_account_id :container-instance/ container_instance_UUID . + + :type force: boolean + :param force: Force the deregistration of the container instance. You + can use the `force` parameter if you have several tasks running on + a container instance and you don't want to run `StopTask` for each + task before deregistering the container instance. + + """ + params = {'containerInstance': container_instance, } + if cluster is not None: + params['cluster'] = cluster + if force is not None: + params['force'] = str( + force).lower() + return self._make_request( + action='DeregisterContainerInstance', + verb='POST', + path='/', params=params) + + def deregister_task_definition(self, task_definition): + """ + Deregisters the specified task definition. You will no longer + be able to run tasks from this definition after + deregistration. + + :type task_definition: string + :param task_definition: The `family` and `revision` ( + `family:revision`) or full Amazon Resource Name (ARN) of the task + definition that you want to deregister. + + """ + params = {'taskDefinition': task_definition, } + return self._make_request( + action='DeregisterTaskDefinition', + verb='POST', + path='/', params=params) + + def describe_clusters(self, clusters=None): + """ + Describes one or more of your clusters. + + :type clusters: list + :param clusters: A space-separated list of cluster names or full + cluster Amazon Resource Name (ARN) entries. If you do not specify a + cluster, the default cluster is assumed. + + """ + params = {} + if clusters is not None: + self.build_list_params(params, + clusters, + 'clusters.member') + return self._make_request( + action='DescribeClusters', + verb='POST', + path='/', params=params) + + def describe_container_instances(self, container_instances, cluster=None): + """ + Describes Amazon EC2 Container Service container instances. + Returns metadata about registered and remaining resources on + each container instance requested. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the container instances you want to + describe. If you do not specify a cluster, the default cluster is + assumed. + + :type container_instances: list + :param container_instances: A space-separated list of container + instance UUIDs or full Amazon Resource Name (ARN) entries. + + """ + params = {} + self.build_list_params(params, + container_instances, + 'containerInstances.member') + if cluster is not None: + params['cluster'] = cluster + return self._make_request( + action='DescribeContainerInstances', + verb='POST', + path='/', params=params) + + def describe_task_definition(self, task_definition): + """ + Describes a task definition. + + :type task_definition: string + :param task_definition: The `family` and `revision` ( + `family:revision`) or full Amazon Resource Name (ARN) of the task + definition that you want to describe. + + """ + params = {'taskDefinition': task_definition, } + return self._make_request( + action='DescribeTaskDefinition', + verb='POST', + path='/', params=params) + + def describe_tasks(self, tasks, cluster=None): + """ + Describes a specified task or tasks. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the task you want to describe. If you do not + specify a cluster, the default cluster is assumed. + + :type tasks: list + :param tasks: A space-separated list of task UUIDs or full Amazon + Resource Name (ARN) entries. + + """ + params = {} + self.build_list_params(params, + tasks, + 'tasks.member') + if cluster is not None: + params['cluster'] = cluster + return self._make_request( + action='DescribeTasks', + verb='POST', + path='/', params=params) + + def discover_poll_endpoint(self, container_instance=None): + """ + This action is only used by the Amazon EC2 Container Service + agent, and it is not intended for use outside of the agent. + + + Returns an endpoint for the Amazon EC2 Container Service agent + to poll for updates. + + :type container_instance: string + :param container_instance: The container instance UUID or full Amazon + Resource Name (ARN) of the container instance. The ARN contains the + `arn:aws:ecs` namespace, followed by the region of the container + instance, the AWS account ID of the container instance owner, the + `container-instance` namespace, and then the container instance + UUID. For example, arn:aws:ecs: region : aws_account_id :container- + instance/ container_instance_UUID . + + """ + params = {} + if container_instance is not None: + params['containerInstance'] = container_instance + return self._make_request( + action='DiscoverPollEndpoint', + verb='POST', + path='/', params=params) + + def list_clusters(self, next_token=None, max_results=None): + """ + Returns a list of existing clusters. + + :type next_token: string + :param next_token: The `nextToken` value returned from a previous + paginated `ListClusters` request where `maxResults` was used and + the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the + `nextToken` value. This value is `null` when there are no more + results to return. + + :type max_results: integer + :param max_results: The maximum number of cluster results returned by + `ListClusters` in paginated output. When this parameter is used, + `ListClusters` only returns `maxResults` results in a single page + along with a `nextToken` response element. The remaining results of + the initial request can be seen by sending another `ListClusters` + request with the returned `nextToken` value. This value can be + between 1 and 100. If this parameter is not used, then + `ListClusters` returns up to 100 results and a `nextToken` value if + applicable. + + """ + params = {} + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self._make_request( + action='ListClusters', + verb='POST', + path='/', params=params) + + def list_container_instances(self, cluster=None, next_token=None, + max_results=None): + """ + Returns a list of container instances in a specified cluster. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the container instances you want to list. If + you do not specify a cluster, the default cluster is assumed.. + + :type next_token: string + :param next_token: The `nextToken` value returned from a previous + paginated `ListContainerInstances` request where `maxResults` was + used and the results exceeded the value of that parameter. + Pagination continues from the end of the previous results that + returned the `nextToken` value. This value is `null` when there are + no more results to return. + + :type max_results: integer + :param max_results: The maximum number of container instance results + returned by `ListContainerInstances` in paginated output. When this + parameter is used, `ListContainerInstances` only returns + `maxResults` results in a single page along with a `nextToken` + response element. The remaining results of the initial request can + be seen by sending another `ListContainerInstances` request with + the returned `nextToken` value. This value can be between 1 and + 100. If this parameter is not used, then `ListContainerInstances` + returns up to 100 results and a `nextToken` value if applicable. + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self._make_request( + action='ListContainerInstances', + verb='POST', + path='/', params=params) + + def list_task_definitions(self, family_prefix=None, next_token=None, + max_results=None): + """ + Returns a list of task definitions that are registered to your + account. You can filter the results by family name with the + `familyPrefix` parameter. + + :type family_prefix: string + :param family_prefix: The name of the family that you want to filter + the `ListTaskDefinitions` results with. Specifying a `familyPrefix` + will limit the listed task definitions to definitions that belong + to that family. + + :type next_token: string + :param next_token: The `nextToken` value returned from a previous + paginated `ListTaskDefinitions` request where `maxResults` was used + and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the + `nextToken` value. This value is `null` when there are no more + results to return. + + :type max_results: integer + :param max_results: The maximum number of task definition results + returned by `ListTaskDefinitions` in paginated output. When this + parameter is used, `ListTaskDefinitions` only returns `maxResults` + results in a single page along with a `nextToken` response element. + The remaining results of the initial request can be seen by sending + another `ListTaskDefinitions` request with the returned `nextToken` + value. This value can be between 1 and 100. If this parameter is + not used, then `ListTaskDefinitions` returns up to 100 results and + a `nextToken` value if applicable. + + """ + params = {} + if family_prefix is not None: + params['familyPrefix'] = family_prefix + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self._make_request( + action='ListTaskDefinitions', + verb='POST', + path='/', params=params) + + def list_tasks(self, cluster=None, container_instance=None, family=None, + next_token=None, max_results=None): + """ + Returns a list of tasks for a specified cluster. You can + filter the results by family name or by a particular container + instance with the `family` and `containerInstance` parameters. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the tasks you want to list. If you do not + specify a cluster, the default cluster is assumed.. + + :type container_instance: string + :param container_instance: The container instance UUID or full Amazon + Resource Name (ARN) of the container instance that you want to + filter the `ListTasks` results with. Specifying a + `containerInstance` will limit the results to tasks that belong to + that container instance. + + :type family: string + :param family: The name of the family that you want to filter the + `ListTasks` results with. Specifying a `family` will limit the + results to tasks that belong to that family. + + :type next_token: string + :param next_token: The `nextToken` value returned from a previous + paginated `ListTasks` request where `maxResults` was used and the + results exceeded the value of that parameter. Pagination continues + from the end of the previous results that returned the `nextToken` + value. This value is `null` when there are no more results to + return. + + :type max_results: integer + :param max_results: The maximum number of task results returned by + `ListTasks` in paginated output. When this parameter is used, + `ListTasks` only returns `maxResults` results in a single page + along with a `nextToken` response element. The remaining results of + the initial request can be seen by sending another `ListTasks` + request with the returned `nextToken` value. This value can be + between 1 and 100. If this parameter is not used, then `ListTasks` + returns up to 100 results and a `nextToken` value if applicable. + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if container_instance is not None: + params['containerInstance'] = container_instance + if family is not None: + params['family'] = family + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self._make_request( + action='ListTasks', + verb='POST', + path='/', params=params) + + def register_container_instance(self, cluster=None, + instance_identity_document=None, + instance_identity_document_signature=None, + total_resources=None): + """ + This action is only used by the Amazon EC2 Container Service + agent, and it is not intended for use outside of the agent. + + + Registers an Amazon EC2 instance into the specified cluster. + This instance will become available to place containers on. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that you want to register your container instance with. + If you do not specify a cluster, the default cluster is assumed.. + + :type instance_identity_document: string + :param instance_identity_document: + + :type instance_identity_document_signature: string + :param instance_identity_document_signature: + + :type total_resources: list + :param total_resources: + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if instance_identity_document is not None: + params['instanceIdentityDocument'] = instance_identity_document + if instance_identity_document_signature is not None: + params['instanceIdentityDocumentSignature'] = instance_identity_document_signature + if total_resources is not None: + self.build_complex_list_params( + params, total_resources, + 'totalResources.member', + ('name', 'type', 'doubleValue', 'longValue', 'integerValue', 'stringSetValue')) + return self._make_request( + action='RegisterContainerInstance', + verb='POST', + path='/', params=params) + + def register_task_definition(self, family, container_definitions): + """ + Registers a new task definition from the supplied `family` and + `containerDefinitions`. + + :type family: string + :param family: You can specify a `family` for a task definition, which + allows you to track multiple versions of the same task definition. + You can think of the `family` as a name for your task definition. + + :type container_definitions: list + :param container_definitions: A list of container definitions in JSON + format that describe the different containers that make up your + task. + + """ + params = {'family': family, } + self.build_complex_list_params( + params, container_definitions, + 'containerDefinitions.member', + ('name', 'image', 'cpu', 'memory', 'links', 'portMappings', 'essential', 'entryPoint', 'command', 'environment')) + return self._make_request( + action='RegisterTaskDefinition', + verb='POST', + path='/', params=params) + + def run_task(self, task_definition, cluster=None, overrides=None, + count=None): + """ + Start a task using random placement and the default Amazon ECS + scheduler. If you want to use your own scheduler or place a + task on a specific container instance, use `StartTask` + instead. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that you want to run your task on. If you do not + specify a cluster, the default cluster is assumed.. + + :type task_definition: string + :param task_definition: The `family` and `revision` ( + `family:revision`) or full Amazon Resource Name (ARN) of the task + definition that you want to run. + + :type overrides: dict + :param overrides: + + :type count: integer + :param count: The number of instances of the specified task that you + would like to place on your cluster. + + """ + params = {'taskDefinition': task_definition, } + if cluster is not None: + params['cluster'] = cluster + if overrides is not None: + params['overrides'] = overrides + if count is not None: + params['count'] = count + return self._make_request( + action='RunTask', + verb='POST', + path='/', params=params) + + def start_task(self, task_definition, container_instances, cluster=None, + overrides=None): + """ + Starts a new task from the specified task definition on the + specified container instance or instances. If you want to use + the default Amazon ECS scheduler to place your task, use + `RunTask` instead. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that you want to start your task on. If you do not + specify a cluster, the default cluster is assumed.. + + :type task_definition: string + :param task_definition: The `family` and `revision` ( + `family:revision`) or full Amazon Resource Name (ARN) of the task + definition that you want to start. + + :type overrides: dict + :param overrides: + + :type container_instances: list + :param container_instances: The container instance UUIDs or full Amazon + Resource Name (ARN) entries for the container instances on which + you would like to place your task. + + """ + params = {'taskDefinition': task_definition, } + self.build_list_params(params, + container_instances, + 'containerInstances.member') + if cluster is not None: + params['cluster'] = cluster + if overrides is not None: + params['overrides'] = overrides + return self._make_request( + action='StartTask', + verb='POST', + path='/', params=params) + + def stop_task(self, task, cluster=None): + """ + Stops a running task. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the task you want to stop. If you do not + specify a cluster, the default cluster is assumed.. + + :type task: string + :param task: The task UUIDs or full Amazon Resource Name (ARN) entry of + the task you would like to stop. + + """ + params = {'task': task, } + if cluster is not None: + params['cluster'] = cluster + return self._make_request( + action='StopTask', + verb='POST', + path='/', params=params) + + def submit_container_state_change(self, cluster=None, task=None, + container_name=None, status=None, + exit_code=None, reason=None, + network_bindings=None): + """ + This action is only used by the Amazon EC2 Container Service + agent, and it is not intended for use outside of the agent. + + + Sent to acknowledge that a container changed states. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the container. + + :type task: string + :param task: The task UUID or full Amazon Resource Name (ARN) of the + task that hosts the container. + + :type container_name: string + :param container_name: The name of the container. + + :type status: string + :param status: The status of the state change request. + + :type exit_code: integer + :param exit_code: The exit code returned for the state change request. + + :type reason: string + :param reason: The reason for the state change request. + + :type network_bindings: list + :param network_bindings: The network bindings of the container. + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if task is not None: + params['task'] = task + if container_name is not None: + params['containerName'] = container_name + if status is not None: + params['status'] = status + if exit_code is not None: + params['exitCode'] = exit_code + if reason is not None: + params['reason'] = reason + if network_bindings is not None: + self.build_complex_list_params( + params, network_bindings, + 'networkBindings.member', + ('bindIP', 'containerPort', 'hostPort')) + return self._make_request( + action='SubmitContainerStateChange', + verb='POST', + path='/', params=params) + + def submit_task_state_change(self, cluster=None, task=None, status=None, + reason=None): + """ + This action is only used by the Amazon EC2 Container Service + agent, and it is not intended for use outside of the agent. + + + Sent to acknowledge that a task changed states. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the task. + + :type task: string + :param task: The task UUID or full Amazon Resource Name (ARN) of the + task in the state change request. + + :type status: string + :param status: The status of the state change request. + + :type reason: string + :param reason: The reason for the state change request. + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if task is not None: + params['task'] = task + if status is not None: + params['status'] = status + if reason is not None: + params['reason'] = reason + return self._make_request( + action='SubmitTaskStateChange', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ecs/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/ecs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46db50650efb3afd1112a121fe93b93b5982692c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ecs/__init__.py @@ -0,0 +1,105 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.connection import AWSQueryConnection, AWSAuthConnection +from boto.exception import BotoServerError +import time +import urllib +import xml.sax +from boto.ecs.item import ItemSet +from boto import handler + +class ECSConnection(AWSQueryConnection): + """ + ECommerce Connection + + For more information on how to use this module see: + + http://blog.coredumped.org/2010/09/search-for-books-on-amazon-using-boto.html + """ + + APIVersion = '2010-11-01' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, host='ecs.amazonaws.com', + debug=0, https_connection_factory=None, path='/', + security_token=None, profile_name=None): + super(ECSConnection, self).__init__(aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + host, debug, https_connection_factory, path, + security_token=security_token, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['ecs'] + + def get_response(self, action, params, page=0, itemSet=None): + """ + Utility method to handle calls to ECS and parsing of responses. + """ + params['Service'] = "AWSECommerceService" + params['Operation'] = action + if page: + params['ItemPage'] = page + response = self.make_request(None, params, "/onca/xml") + body = response.read().decode('utf-8') + boto.log.debug(body) + + if response.status != 200: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise BotoServerError(response.status, response.reason, body) + + if itemSet is None: + rs = ItemSet(self, action, params, page) + else: + rs = itemSet + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body.encode('utf-8'), h) + if not rs.is_valid: + raise BotoServerError(response.status, '{Code}: {Message}'.format(**rs.errors[0])) + return rs + + # + # Group methods + # + + def item_search(self, search_index, **params): + """ + Returns items that satisfy the search criteria, including one or more search + indices. + + For a full list of search terms, + :see: http://docs.amazonwebservices.com/AWSECommerceService/2010-09-01/DG/index.html?ItemSearch.html + """ + params['SearchIndex'] = search_index + return self.get_response('ItemSearch', params) + + def item_lookup(self, **params): + """ + Returns items that satisfy the lookup query. + + For a full list of parameters, see: + http://s3.amazonaws.com/awsdocs/Associates/2011-08-01/prod-adv-api-dg-2011-08-01.pdf + """ + return self.get_response('ItemLookup', params) \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ecs/item.py b/desktop/core/ext-py/boto-2.38.0/boto/ecs/item.py new file mode 100644 index 0000000000000000000000000000000000000000..79177a31d4a0354fd38e170d2aeca1f8bef163f6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ecs/item.py @@ -0,0 +1,164 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +import xml.sax +import cgi +from boto.compat import six, StringIO + +class ResponseGroup(xml.sax.ContentHandler): + """A Generic "Response Group", which can + be anything from the entire list of Items to + specific response elements within an item""" + + def __init__(self, connection=None, nodename=None): + """Initialize this Item""" + self._connection = connection + self._nodename = nodename + self._nodepath = [] + self._curobj = None + self._xml = StringIO() + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.__dict__) + + # + # Attribute Functions + # + def get(self, name): + return self.__dict__.get(name) + + def set(self, name, value): + self.__dict__[name] = value + + def to_xml(self): + return "<%s>%s" % (self._nodename, self._xml.getvalue(), self._nodename) + + # + # XML Parser functions + # + def startElement(self, name, attrs, connection): + self._xml.write("<%s>" % name) + self._nodepath.append(name) + if len(self._nodepath) == 1: + obj = ResponseGroup(self._connection) + self.set(name, obj) + self._curobj = obj + elif self._curobj: + self._curobj.startElement(name, attrs, connection) + return None + + def endElement(self, name, value, connection): + self._xml.write("%s" % (cgi.escape(value).replace("&amp;", "&"), name)) + if len(self._nodepath) == 0: + return + obj = None + curval = self.get(name) + if len(self._nodepath) == 1: + if value or not curval: + self.set(name, value) + if self._curobj: + self._curobj = None + #elif len(self._nodepath) == 2: + #self._curobj = None + elif self._curobj: + self._curobj.endElement(name, value, connection) + self._nodepath.pop() + return None + + +class Item(ResponseGroup): + """A single Item""" + + def __init__(self, connection=None): + """Initialize this Item""" + ResponseGroup.__init__(self, connection, "Item") + +class ItemSet(ResponseGroup): + """A special ResponseGroup that has built-in paging, and + only creates new Items on the "Item" tag""" + + def __init__(self, connection, action, params, page=0): + ResponseGroup.__init__(self, connection, "Items") + self.objs = [] + self.iter = None + self.page = page + self.action = action + self.params = params + self.curItem = None + self.total_results = 0 + self.total_pages = 0 + self.is_valid = False + self.errors = [] + + def startElement(self, name, attrs, connection): + if name == "Item": + self.curItem = Item(self._connection) + elif self.curItem is not None: + self.curItem.startElement(name, attrs, connection) + return None + + def endElement(self, name, value, connection): + if name == 'TotalResults': + self.total_results = value + elif name == 'TotalPages': + self.total_pages = value + elif name == 'IsValid': + if value == 'True': + self.is_valid = True + elif name == 'Code': + self.errors.append({'Code': value, 'Message': None}) + elif name == 'Message': + self.errors[-1]['Message'] = value + elif name == 'Item': + self.objs.append(self.curItem) + self._xml.write(self.curItem.to_xml()) + self.curItem = None + elif self.curItem is not None: + self.curItem.endElement(name, value, connection) + return None + + def __next__(self): + """Special paging functionality""" + if self.iter is None: + self.iter = iter(self.objs) + try: + return next(self.iter) + except StopIteration: + self.iter = None + self.objs = [] + if int(self.page) < int(self.total_pages): + self.page += 1 + self._connection.get_response(self.action, self.params, self.page, self) + return next(self) + else: + raise + + next = __next__ + + def __iter__(self): + return self + + def to_xml(self): + """Override to first fetch everything""" + for item in self: + pass + return ResponseGroup.to_xml(self) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/elasticache/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/elasticache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..73d28c9f9665ffd21d4b6e788013f8473c13c6a4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/elasticache/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS ElastiCache service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.elasticache.layer1 import ElastiCacheConnection + return get_regions('elasticache', connection_cls=ElastiCacheConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/elasticache/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/elasticache/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..62bdefd18797bcfd5c88d405e75a9626dbc7c3ac --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/elasticache/layer1.py @@ -0,0 +1,1664 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo + + +class ElastiCacheConnection(AWSQueryConnection): + """ + Amazon ElastiCache + Amazon ElastiCache is a web service that makes it easier to set + up, operate, and scale a distributed cache in the cloud. + + With ElastiCache, customers gain all of the benefits of a high- + performance, in-memory cache with far less of the administrative + burden of launching and managing a distributed cache. The service + makes set-up, scaling, and cluster failure handling much simpler + than in a self-managed cache deployment. + + In addition, through integration with Amazon CloudWatch, customers + get enhanced visibility into the key performance statistics + associated with their cache and can receive alarms if a part of + their cache runs hot. + """ + APIVersion = "2013-06-15" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "elasticache.us-east-1.amazonaws.com" + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + else: + del kwargs['region'] + kwargs['host'] = region.endpoint + super(ElastiCacheConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def authorize_cache_security_group_ingress(self, + cache_security_group_name, + ec2_security_group_name, + ec2_security_group_owner_id): + """ + The AuthorizeCacheSecurityGroupIngress operation allows + network ingress to a cache security group. Applications using + ElastiCache must be running on Amazon EC2, and Amazon EC2 + security groups are used as the authorization mechanism. + You cannot authorize ingress from an Amazon EC2 security group + in one Region to an ElastiCache cluster in another Region. + + :type cache_security_group_name: string + :param cache_security_group_name: The cache security group which will + allow network ingress. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The Amazon EC2 security group to be + authorized for ingress to the cache security group. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS account number of the + Amazon EC2 security group owner. Note that this is not the same + thing as an AWS access key ID - you must provide a valid AWS + account number for this parameter. + + """ + params = { + 'CacheSecurityGroupName': cache_security_group_name, + 'EC2SecurityGroupName': ec2_security_group_name, + 'EC2SecurityGroupOwnerId': ec2_security_group_owner_id, + } + return self._make_request( + action='AuthorizeCacheSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def create_cache_cluster(self, cache_cluster_id, num_cache_nodes=None, + cache_node_type=None, engine=None, + replication_group_id=None, engine_version=None, + cache_parameter_group_name=None, + cache_subnet_group_name=None, + cache_security_group_names=None, + security_group_ids=None, snapshot_arns=None, + preferred_availability_zone=None, + preferred_maintenance_window=None, port=None, + notification_topic_arn=None, + auto_minor_version_upgrade=None): + """ + The CreateCacheCluster operation creates a new cache cluster. + All nodes in the cache cluster run the same protocol-compliant + cache engine software - either Memcached or Redis. + + :type cache_cluster_id: string + :param cache_cluster_id: + The cache cluster identifier. This parameter is stored as a lowercase + string. + + Constraints: + + + + Must contain from 1 to 20 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type replication_group_id: string + :param replication_group_id: The replication group to which this cache + cluster should belong. If this parameter is specified, the cache + cluster will be added to the specified replication group as a read + replica; otherwise, the cache cluster will be a standalone primary + that is not part of any replication group. + + :type num_cache_nodes: integer + :param num_cache_nodes: The initial number of cache nodes that the + cache cluster will have. + For a Memcached cluster, valid values are between 1 and 20. If you need + to exceed this limit, please fill out the ElastiCache Limit + Increase Request form at ``_ . + + For Redis, only single-node cache clusters are supported at this time, + so the value for this parameter must be 1. + + :type cache_node_type: string + :param cache_node_type: The compute and memory capacity of the nodes in + the cache cluster. + Valid values for Memcached: + + `cache.t1.micro` | `cache.m1.small` | `cache.m1.medium` | + `cache.m1.large` | `cache.m1.xlarge` | `cache.m3.xlarge` | + `cache.m3.2xlarge` | `cache.m2.xlarge` | `cache.m2.2xlarge` | + `cache.m2.4xlarge` | `cache.c1.xlarge` + + Valid values for Redis: + + `cache.t1.micro` | `cache.m1.small` | `cache.m1.medium` | + `cache.m1.large` | `cache.m1.xlarge` | `cache.m2.xlarge` | + `cache.m2.2xlarge` | `cache.m2.4xlarge` | `cache.c1.xlarge` + + For a complete listing of cache node types and specifications, see `. + + :type engine: string + :param engine: The name of the cache engine to be used for this cache + cluster. + Valid values for this parameter are: + + `memcached` | `redis` + + :type engine_version: string + :param engine_version: The version number of the cache engine to be + used for this cluster. To view the supported cache engine versions, + use the DescribeCacheEngineVersions operation. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of the cache parameter + group to associate with this cache cluster. If this argument is + omitted, the default cache parameter group for the specified engine + will be used. + + :type cache_subnet_group_name: string + :param cache_subnet_group_name: The name of the cache subnet group to + be used for the cache cluster. + Use this parameter only when you are creating a cluster in an Amazon + Virtual Private Cloud (VPC). + + :type cache_security_group_names: list + :param cache_security_group_names: A list of cache security group names + to associate with this cache cluster. + Use this parameter only when you are creating a cluster outside of an + Amazon Virtual Private Cloud (VPC). + + :type security_group_ids: list + :param security_group_ids: One or more VPC security groups associated + with the cache cluster. + Use this parameter only when you are creating a cluster in an Amazon + Virtual Private Cloud (VPC). + + :type snapshot_arns: list + :param snapshot_arns: A single-element string list containing an Amazon + Resource Name (ARN) that uniquely identifies a Redis RDB snapshot + file stored in Amazon S3. The snapshot file will be used to + populate the Redis cache in the new cache cluster. The Amazon S3 + object name in the ARN cannot contain any commas. + Here is an example of an Amazon S3 ARN: + `arn:aws:s3:::my_bucket/snapshot1.rdb` + + **Note:** This parameter is only valid if the `Engine` parameter is + `redis`. + + :type preferred_availability_zone: string + :param preferred_availability_zone: The EC2 Availability Zone in which + the cache cluster will be created. + All cache nodes belonging to a cache cluster are placed in the + preferred availability zone. + + Default: System chosen availability zone. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur. + Example: `sun:05:00-sun:09:00` + + :type port: integer + :param port: The port number on which each of the cache nodes will + accept connections. + + :type notification_topic_arn: string + :param notification_topic_arn: + The Amazon Resource Name (ARN) of the Amazon Simple Notification + Service (SNS) topic to which notifications will be sent. + + The Amazon SNS topic owner must be the same as the cache cluster owner. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Determines whether minor engine + upgrades will be applied automatically to the cache cluster during + the maintenance window. A value of `True` allows these upgrades to + occur; `False` disables automatic upgrades. + Default: `True` + + """ + params = { + 'CacheClusterId': cache_cluster_id, + } + if num_cache_nodes is not None: + params['NumCacheNodes'] = num_cache_nodes + if cache_node_type is not None: + params['CacheNodeType'] = cache_node_type + if engine is not None: + params['Engine'] = engine + if replication_group_id is not None: + params['ReplicationGroupId'] = replication_group_id + if engine_version is not None: + params['EngineVersion'] = engine_version + if cache_parameter_group_name is not None: + params['CacheParameterGroupName'] = cache_parameter_group_name + if cache_subnet_group_name is not None: + params['CacheSubnetGroupName'] = cache_subnet_group_name + if cache_security_group_names is not None: + self.build_list_params(params, + cache_security_group_names, + 'CacheSecurityGroupNames.member') + if security_group_ids is not None: + self.build_list_params(params, + security_group_ids, + 'SecurityGroupIds.member') + if snapshot_arns is not None: + self.build_list_params(params, + snapshot_arns, + 'SnapshotArns.member') + if preferred_availability_zone is not None: + params['PreferredAvailabilityZone'] = preferred_availability_zone + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if port is not None: + params['Port'] = port + if notification_topic_arn is not None: + params['NotificationTopicArn'] = notification_topic_arn + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + return self._make_request( + action='CreateCacheCluster', + verb='POST', + path='/', params=params) + + def create_cache_parameter_group(self, cache_parameter_group_name, + cache_parameter_group_family, + description): + """ + The CreateCacheParameterGroup operation creates a new cache + parameter group. A cache parameter group is a collection of + parameters that you apply to all of the nodes in a cache + cluster. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: A user-specified name for the cache + parameter group. + + :type cache_parameter_group_family: string + :param cache_parameter_group_family: The name of the cache parameter + group family the cache parameter group can be used with. + Valid values are: `memcached1.4` | `redis2.6` + + :type description: string + :param description: A user-specified description for the cache + parameter group. + + """ + params = { + 'CacheParameterGroupName': cache_parameter_group_name, + 'CacheParameterGroupFamily': cache_parameter_group_family, + 'Description': description, + } + return self._make_request( + action='CreateCacheParameterGroup', + verb='POST', + path='/', params=params) + + def create_cache_security_group(self, cache_security_group_name, + description): + """ + The CreateCacheSecurityGroup operation creates a new cache + security group. Use a cache security group to control access + to one or more cache clusters. + + Cache security groups are only used when you are creating a + cluster outside of an Amazon Virtual Private Cloud (VPC). If + you are creating a cluster inside of a VPC, use a cache subnet + group instead. For more information, see + CreateCacheSubnetGroup . + + :type cache_security_group_name: string + :param cache_security_group_name: A name for the cache security group. + This value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters. + Must not be the word "Default". + + Example: `mysecuritygroup` + + :type description: string + :param description: A description for the cache security group. + + """ + params = { + 'CacheSecurityGroupName': cache_security_group_name, + 'Description': description, + } + return self._make_request( + action='CreateCacheSecurityGroup', + verb='POST', + path='/', params=params) + + def create_cache_subnet_group(self, cache_subnet_group_name, + cache_subnet_group_description, subnet_ids): + """ + The CreateCacheSubnetGroup operation creates a new cache + subnet group. + + Use this parameter only when you are creating a cluster in an + Amazon Virtual Private Cloud (VPC). + + :type cache_subnet_group_name: string + :param cache_subnet_group_name: A name for the cache subnet group. This + value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. + + Example: `mysubnetgroup` + + :type cache_subnet_group_description: string + :param cache_subnet_group_description: A description for the cache + subnet group. + + :type subnet_ids: list + :param subnet_ids: A list of VPC subnet IDs for the cache subnet group. + + """ + params = { + 'CacheSubnetGroupName': cache_subnet_group_name, + 'CacheSubnetGroupDescription': cache_subnet_group_description, + } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + return self._make_request( + action='CreateCacheSubnetGroup', + verb='POST', + path='/', params=params) + + def create_replication_group(self, replication_group_id, + primary_cluster_id, + replication_group_description): + """ + The CreateReplicationGroup operation creates a replication + group. A replication group is a collection of cache clusters, + where one of the clusters is a read/write primary and the + other clusters are read-only replicas. Writes to the primary + are automatically propagated to the replicas. + + When you create a replication group, you must specify an + existing cache cluster that is in the primary role. When the + replication group has been successfully created, you can add + one or more read replica replicas to it, up to a total of five + read replicas. + + :type replication_group_id: string + :param replication_group_id: + The replication group identifier. This parameter is stored as a + lowercase string. + + Constraints: + + + + Must contain from 1 to 20 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type primary_cluster_id: string + :param primary_cluster_id: The identifier of the cache cluster that + will serve as the primary for this replication group. This cache + cluster must already exist and have a status of available . + + :type replication_group_description: string + :param replication_group_description: A user-specified description for + the replication group. + + """ + params = { + 'ReplicationGroupId': replication_group_id, + 'PrimaryClusterId': primary_cluster_id, + 'ReplicationGroupDescription': replication_group_description, + } + return self._make_request( + action='CreateReplicationGroup', + verb='POST', + path='/', params=params) + + def delete_cache_cluster(self, cache_cluster_id): + """ + The DeleteCacheCluster operation deletes a previously + provisioned cache cluster. DeleteCacheCluster deletes all + associated cache nodes, node endpoints and the cache cluster + itself. When you receive a successful response from this + operation, Amazon ElastiCache immediately begins deleting the + cache cluster; you cannot cancel or revert this operation. + + :type cache_cluster_id: string + :param cache_cluster_id: The cache cluster identifier for the cluster + to be deleted. This parameter is not case sensitive. + + """ + params = {'CacheClusterId': cache_cluster_id, } + return self._make_request( + action='DeleteCacheCluster', + verb='POST', + path='/', params=params) + + def delete_cache_parameter_group(self, cache_parameter_group_name): + """ + The DeleteCacheParameterGroup operation deletes the specified + cache parameter group. You cannot delete a cache parameter + group if it is associated with any cache clusters. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: + The name of the cache parameter group to delete. + + The specified cache security group must not be associated with any + cache clusters. + + """ + params = { + 'CacheParameterGroupName': cache_parameter_group_name, + } + return self._make_request( + action='DeleteCacheParameterGroup', + verb='POST', + path='/', params=params) + + def delete_cache_security_group(self, cache_security_group_name): + """ + The DeleteCacheSecurityGroup operation deletes a cache + security group. + You cannot delete a cache security group if it is associated + with any cache clusters. + + :type cache_security_group_name: string + :param cache_security_group_name: + The name of the cache security group to delete. + + You cannot delete the default security group. + + """ + params = { + 'CacheSecurityGroupName': cache_security_group_name, + } + return self._make_request( + action='DeleteCacheSecurityGroup', + verb='POST', + path='/', params=params) + + def delete_cache_subnet_group(self, cache_subnet_group_name): + """ + The DeleteCacheSubnetGroup operation deletes a cache subnet + group. + You cannot delete a cache subnet group if it is associated + with any cache clusters. + + :type cache_subnet_group_name: string + :param cache_subnet_group_name: The name of the cache subnet group to + delete. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. + + """ + params = {'CacheSubnetGroupName': cache_subnet_group_name, } + return self._make_request( + action='DeleteCacheSubnetGroup', + verb='POST', + path='/', params=params) + + def delete_replication_group(self, replication_group_id): + """ + The DeleteReplicationGroup operation deletes an existing + replication group. DeleteReplicationGroup deletes the primary + cache cluster and all of the read replicas in the replication + group. When you receive a successful response from this + operation, Amazon ElastiCache immediately begins deleting the + entire replication group; you cannot cancel or revert this + operation. + + :type replication_group_id: string + :param replication_group_id: The identifier for the replication group + to be deleted. This parameter is not case sensitive. + + """ + params = {'ReplicationGroupId': replication_group_id, } + return self._make_request( + action='DeleteReplicationGroup', + verb='POST', + path='/', params=params) + + def describe_cache_clusters(self, cache_cluster_id=None, + max_records=None, marker=None, + show_cache_node_info=None): + """ + The DescribeCacheClusters operation returns information about + all provisioned cache clusters if no cache cluster identifier + is specified, or about a specific cache cluster if a cache + cluster identifier is supplied. + + By default, abbreviated information about the cache + clusters(s) will be returned. You can use the optional + ShowDetails flag to retrieve detailed information about the + cache nodes associated with the cache clusters. These details + include the DNS address and port for the cache node endpoint. + + If the cluster is in the CREATING state, only cluster level + information will be displayed until all of the nodes are + successfully provisioned. + + If the cluster is in the DELETING state, only cluster level + information will be displayed. + + If cache nodes are currently being added to the cache cluster, + node endpoint information and creation time for the additional + nodes will not be displayed until they are completely + provisioned. When the cache cluster state is available , the + cluster is ready for use. + + If cache nodes are currently being removed from the cache + cluster, no endpoint information for the removed nodes is + displayed. + + :type cache_cluster_id: string + :param cache_cluster_id: The user-supplied cluster identifier. If this + parameter is specified, only information about that specific cache + cluster is returned. This parameter isn't case sensitive. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + :type show_cache_node_info: boolean + :param show_cache_node_info: An optional flag that can be included in + the DescribeCacheCluster request to retrieve information about the + individual cache nodes. + + """ + params = {} + if cache_cluster_id is not None: + params['CacheClusterId'] = cache_cluster_id + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + if show_cache_node_info is not None: + params['ShowCacheNodeInfo'] = str( + show_cache_node_info).lower() + return self._make_request( + action='DescribeCacheClusters', + verb='POST', + path='/', params=params) + + def describe_cache_engine_versions(self, engine=None, + engine_version=None, + cache_parameter_group_family=None, + max_records=None, marker=None, + default_only=None): + """ + The DescribeCacheEngineVersions operation returns a list of + the available cache engines and their versions. + + :type engine: string + :param engine: The cache engine to return. Valid values: `memcached` | + `redis` + + :type engine_version: string + :param engine_version: The cache engine version to return. + Example: `1.4.14` + + :type cache_parameter_group_family: string + :param cache_parameter_group_family: + The name of a specific cache parameter group family to return details + for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + :type default_only: boolean + :param default_only: If true , specifies that only the default version + of the specified engine or engine and major version combination is + to be returned. + + """ + params = {} + if engine is not None: + params['Engine'] = engine + if engine_version is not None: + params['EngineVersion'] = engine_version + if cache_parameter_group_family is not None: + params['CacheParameterGroupFamily'] = cache_parameter_group_family + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + if default_only is not None: + params['DefaultOnly'] = str( + default_only).lower() + return self._make_request( + action='DescribeCacheEngineVersions', + verb='POST', + path='/', params=params) + + def describe_cache_parameter_groups(self, + cache_parameter_group_name=None, + max_records=None, marker=None): + """ + The DescribeCacheParameterGroups operation returns a list of + cache parameter group descriptions. If a cache parameter group + name is specified, the list will contain only the descriptions + for that group. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of a specific cache + parameter group to return details for. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if cache_parameter_group_name is not None: + params['CacheParameterGroupName'] = cache_parameter_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeCacheParameterGroups', + verb='POST', + path='/', params=params) + + def describe_cache_parameters(self, cache_parameter_group_name, + source=None, max_records=None, marker=None): + """ + The DescribeCacheParameters operation returns the detailed + parameter list for a particular cache parameter group. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of a specific cache + parameter group to return details for. + + :type source: string + :param source: The parameter types to return. + Valid values: `user` | `system` | `engine-default` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = { + 'CacheParameterGroupName': cache_parameter_group_name, + } + if source is not None: + params['Source'] = source + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeCacheParameters', + verb='POST', + path='/', params=params) + + def describe_cache_security_groups(self, cache_security_group_name=None, + max_records=None, marker=None): + """ + The DescribeCacheSecurityGroups operation returns a list of + cache security group descriptions. If a cache security group + name is specified, the list will contain only the description + of that group. + + :type cache_security_group_name: string + :param cache_security_group_name: The name of the cache security group + to return details for. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if cache_security_group_name is not None: + params['CacheSecurityGroupName'] = cache_security_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeCacheSecurityGroups', + verb='POST', + path='/', params=params) + + def describe_cache_subnet_groups(self, cache_subnet_group_name=None, + max_records=None, marker=None): + """ + The DescribeCacheSubnetGroups operation returns a list of + cache subnet group descriptions. If a subnet group name is + specified, the list will contain only the description of that + group. + + :type cache_subnet_group_name: string + :param cache_subnet_group_name: The name of the cache subnet group to + return details for. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if cache_subnet_group_name is not None: + params['CacheSubnetGroupName'] = cache_subnet_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeCacheSubnetGroups', + verb='POST', + path='/', params=params) + + def describe_engine_default_parameters(self, + cache_parameter_group_family, + max_records=None, marker=None): + """ + The DescribeEngineDefaultParameters operation returns the + default engine and system parameter information for the + specified cache engine. + + :type cache_parameter_group_family: string + :param cache_parameter_group_family: The name of the cache parameter + group family. Valid values are: `memcached1.4` | `redis2.6` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = { + 'CacheParameterGroupFamily': cache_parameter_group_family, + } + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEngineDefaultParameters', + verb='POST', + path='/', params=params) + + def describe_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, duration=None, + max_records=None, marker=None): + """ + The DescribeEvents operation returns events related to cache + clusters, cache security groups, and cache parameter groups. + You can obtain events specific to a particular cache cluster, + cache security group, or cache parameter group by providing + the name as a parameter. + + By default, only the events occurring within the last hour are + returned; however, you can retrieve up to 14 days' worth of + events if necessary. + + :type source_identifier: string + :param source_identifier: The identifier of the event source for which + events will be returned. If not specified, then all sources are + included in the response. + + :type source_type: string + :param source_type: The event source to retrieve events for. If no + value is specified, all events are returned. + Valid values are: `cache-cluster` | `cache-parameter-group` | `cache- + security-group` | `cache-subnet-group` + + :type start_time: timestamp + :param start_time: The beginning of the time interval to retrieve + events for, specified in ISO 8601 format. + + :type end_time: timestamp + :param end_time: The end of the time interval for which to retrieve + events, specified in ISO 8601 format. + + :type duration: integer + :param duration: The number of minutes' worth of events to retrieve. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if source_identifier is not None: + params['SourceIdentifier'] = source_identifier + if source_type is not None: + params['SourceType'] = source_type + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if duration is not None: + params['Duration'] = duration + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEvents', + verb='POST', + path='/', params=params) + + def describe_replication_groups(self, replication_group_id=None, + max_records=None, marker=None): + """ + The DescribeReplicationGroups operation returns information + about a particular replication group. If no identifier is + specified, DescribeReplicationGroups returns information about + all replication groups. + + :type replication_group_id: string + :param replication_group_id: The identifier for the replication group + to be described. This parameter is not case sensitive. + If you do not specify this parameter, information about all replication + groups is returned. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if replication_group_id is not None: + params['ReplicationGroupId'] = replication_group_id + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReplicationGroups', + verb='POST', + path='/', params=params) + + def describe_reserved_cache_nodes(self, reserved_cache_node_id=None, + reserved_cache_nodes_offering_id=None, + cache_node_type=None, duration=None, + product_description=None, + offering_type=None, max_records=None, + marker=None): + """ + The DescribeReservedCacheNodes operation returns information + about reserved cache nodes for this account, or about a + specified reserved cache node. + + :type reserved_cache_node_id: string + :param reserved_cache_node_id: The reserved cache node identifier + filter value. Use this parameter to show only the reservation that + matches the specified reservation ID. + + :type reserved_cache_nodes_offering_id: string + :param reserved_cache_nodes_offering_id: The offering identifier filter + value. Use this parameter to show only purchased reservations + matching the specified offering identifier. + + :type cache_node_type: string + :param cache_node_type: The cache node type filter value. Use this + parameter to show only those reservations matching the specified + cache node type. + + :type duration: string + :param duration: The duration filter value, specified in years or + seconds. Use this parameter to show only reservations for this + duration. + Valid Values: `1 | 3 | 31536000 | 94608000` + + :type product_description: string + :param product_description: The product description filter value. Use + this parameter to show only those reservations matching the + specified product description. + + :type offering_type: string + :param offering_type: The offering type filter value. Use this + parameter to show only the available offerings matching the + specified offering type. + Valid values: `"Light Utilization" | "Medium Utilization" | "Heavy + Utilization" ` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if reserved_cache_node_id is not None: + params['ReservedCacheNodeId'] = reserved_cache_node_id + if reserved_cache_nodes_offering_id is not None: + params['ReservedCacheNodesOfferingId'] = reserved_cache_nodes_offering_id + if cache_node_type is not None: + params['CacheNodeType'] = cache_node_type + if duration is not None: + params['Duration'] = duration + if product_description is not None: + params['ProductDescription'] = product_description + if offering_type is not None: + params['OfferingType'] = offering_type + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedCacheNodes', + verb='POST', + path='/', params=params) + + def describe_reserved_cache_nodes_offerings(self, + reserved_cache_nodes_offering_id=None, + cache_node_type=None, + duration=None, + product_description=None, + offering_type=None, + max_records=None, + marker=None): + """ + The DescribeReservedCacheNodesOfferings operation lists + available reserved cache node offerings. + + :type reserved_cache_nodes_offering_id: string + :param reserved_cache_nodes_offering_id: The offering identifier filter + value. Use this parameter to show only the available offering that + matches the specified reservation identifier. + Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706` + + :type cache_node_type: string + :param cache_node_type: The cache node type filter value. Use this + parameter to show only the available offerings matching the + specified cache node type. + + :type duration: string + :param duration: Duration filter value, specified in years or seconds. + Use this parameter to show only reservations for a given duration. + Valid Values: `1 | 3 | 31536000 | 94608000` + + :type product_description: string + :param product_description: The product description filter value. Use + this parameter to show only the available offerings matching the + specified product description. + + :type offering_type: string + :param offering_type: The offering type filter value. Use this + parameter to show only the available offerings matching the + specified offering type. + Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy + Utilization" ` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if reserved_cache_nodes_offering_id is not None: + params['ReservedCacheNodesOfferingId'] = reserved_cache_nodes_offering_id + if cache_node_type is not None: + params['CacheNodeType'] = cache_node_type + if duration is not None: + params['Duration'] = duration + if product_description is not None: + params['ProductDescription'] = product_description + if offering_type is not None: + params['OfferingType'] = offering_type + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedCacheNodesOfferings', + verb='POST', + path='/', params=params) + + def modify_cache_cluster(self, cache_cluster_id, num_cache_nodes=None, + cache_node_ids_to_remove=None, + cache_security_group_names=None, + security_group_ids=None, + preferred_maintenance_window=None, + notification_topic_arn=None, + cache_parameter_group_name=None, + notification_topic_status=None, + apply_immediately=None, engine_version=None, + auto_minor_version_upgrade=None): + """ + The ModifyCacheCluster operation modifies the settings for a + cache cluster. You can use this operation to change one or + more cluster configuration parameters by specifying the + parameters and the new values. + + :type cache_cluster_id: string + :param cache_cluster_id: The cache cluster identifier. This value is + stored as a lowercase string. + + :type num_cache_nodes: integer + :param num_cache_nodes: The number of cache nodes that the cache + cluster should have. If the value for NumCacheNodes is greater than + the existing number of cache nodes, then more nodes will be added. + If the value is less than the existing number of cache nodes, then + cache nodes will be removed. + If you are removing cache nodes, you must use the CacheNodeIdsToRemove + parameter to provide the IDs of the specific cache nodes to be + removed. + + :type cache_node_ids_to_remove: list + :param cache_node_ids_to_remove: A list of cache node IDs to be + removed. A node ID is a numeric identifier (0001, 0002, etc.). This + parameter is only valid when NumCacheNodes is less than the + existing number of cache nodes. The number of cache node IDs + supplied in this parameter must match the difference between the + existing number of cache nodes in the cluster and the value of + NumCacheNodes in the request. + + :type cache_security_group_names: list + :param cache_security_group_names: A list of cache security group names + to authorize on this cache cluster. This change is asynchronously + applied as soon as possible. + This parameter can be used only with clusters that are created outside + of an Amazon Virtual Private Cloud (VPC). + + Constraints: Must contain no more than 255 alphanumeric characters. + Must not be "Default". + + :type security_group_ids: list + :param security_group_ids: Specifies the VPC Security Groups associated + with the cache cluster. + This parameter can be used only with clusters that are created in an + Amazon Virtual Private Cloud (VPC). + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur. Note that system + maintenance may result in an outage. This change is made + immediately. If you are moving this window to the current time, + there must be at least 120 minutes between the current time and end + of the window to ensure that pending changes are applied. + + :type notification_topic_arn: string + :param notification_topic_arn: + The Amazon Resource Name (ARN) of the SNS topic to which notifications + will be sent. + + The SNS topic owner must be same as the cache cluster owner. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of the cache parameter + group to apply to this cache cluster. This change is asynchronously + applied as soon as possible for parameters when the + ApplyImmediately parameter is specified as true for this request. + + :type notification_topic_status: string + :param notification_topic_status: The status of the Amazon SNS + notification topic. Notifications are sent only if the status is + active . + Valid values: `active` | `inactive` + + :type apply_immediately: boolean + :param apply_immediately: If `True`, this parameter causes the + modifications in this request and any pending modifications to be + applied, asynchronously and as soon as possible, regardless of the + PreferredMaintenanceWindow setting for the cache cluster. + If `False`, then changes to the cache cluster are applied on the next + maintenance reboot, or the next failure reboot, whichever occurs + first. + + Valid values: `True` | `False` + + Default: `False` + + :type engine_version: string + :param engine_version: The upgraded version of the cache engine to be + run on the cache cluster nodes. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: If `True`, then minor engine + upgrades will be applied automatically to the cache cluster during + the maintenance window. + Valid values: `True` | `False` + + Default: `True` + + """ + params = {'CacheClusterId': cache_cluster_id, } + if num_cache_nodes is not None: + params['NumCacheNodes'] = num_cache_nodes + if cache_node_ids_to_remove is not None: + self.build_list_params(params, + cache_node_ids_to_remove, + 'CacheNodeIdsToRemove.member') + if cache_security_group_names is not None: + self.build_list_params(params, + cache_security_group_names, + 'CacheSecurityGroupNames.member') + if security_group_ids is not None: + self.build_list_params(params, + security_group_ids, + 'SecurityGroupIds.member') + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if notification_topic_arn is not None: + params['NotificationTopicArn'] = notification_topic_arn + if cache_parameter_group_name is not None: + params['CacheParameterGroupName'] = cache_parameter_group_name + if notification_topic_status is not None: + params['NotificationTopicStatus'] = notification_topic_status + if apply_immediately is not None: + params['ApplyImmediately'] = str( + apply_immediately).lower() + if engine_version is not None: + params['EngineVersion'] = engine_version + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + return self._make_request( + action='ModifyCacheCluster', + verb='POST', + path='/', params=params) + + def modify_cache_parameter_group(self, cache_parameter_group_name, + parameter_name_values): + """ + The ModifyCacheParameterGroup operation modifies the + parameters of a cache parameter group. You can modify up to 20 + parameters in a single request by submitting a list parameter + name and value pairs. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of the cache parameter + group to modify. + + :type parameter_name_values: list + :param parameter_name_values: An array of parameter names and values + for the parameter update. You must supply at least one parameter + name and value; subsequent arguments are optional. A maximum of 20 + parameters may be modified per request. + + """ + params = { + 'CacheParameterGroupName': cache_parameter_group_name, + } + self.build_complex_list_params( + params, parameter_name_values, + 'ParameterNameValues.member', + ('ParameterName', 'ParameterValue')) + return self._make_request( + action='ModifyCacheParameterGroup', + verb='POST', + path='/', params=params) + + def modify_cache_subnet_group(self, cache_subnet_group_name, + cache_subnet_group_description=None, + subnet_ids=None): + """ + The ModifyCacheSubnetGroup operation modifies an existing + cache subnet group. + + :type cache_subnet_group_name: string + :param cache_subnet_group_name: The name for the cache subnet group. + This value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. + + Example: `mysubnetgroup` + + :type cache_subnet_group_description: string + :param cache_subnet_group_description: A description for the cache + subnet group. + + :type subnet_ids: list + :param subnet_ids: The EC2 subnet IDs for the cache subnet group. + + """ + params = {'CacheSubnetGroupName': cache_subnet_group_name, } + if cache_subnet_group_description is not None: + params['CacheSubnetGroupDescription'] = cache_subnet_group_description + if subnet_ids is not None: + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + return self._make_request( + action='ModifyCacheSubnetGroup', + verb='POST', + path='/', params=params) + + def modify_replication_group(self, replication_group_id, + replication_group_description=None, + cache_security_group_names=None, + security_group_ids=None, + preferred_maintenance_window=None, + notification_topic_arn=None, + cache_parameter_group_name=None, + notification_topic_status=None, + apply_immediately=None, engine_version=None, + auto_minor_version_upgrade=None, + primary_cluster_id=None): + """ + The ModifyReplicationGroup operation modifies the settings for + a replication group. + + :type replication_group_id: string + :param replication_group_id: The identifier of the replication group to + modify. + + :type replication_group_description: string + :param replication_group_description: A description for the replication + group. Maximum length is 255 characters. + + :type cache_security_group_names: list + :param cache_security_group_names: A list of cache security group names + to authorize for the clusters in this replication group. This + change is asynchronously applied as soon as possible. + This parameter can be used only with replication groups containing + cache clusters running outside of an Amazon Virtual Private Cloud + (VPC). + + Constraints: Must contain no more than 255 alphanumeric characters. + Must not be "Default". + + :type security_group_ids: list + :param security_group_ids: Specifies the VPC Security Groups associated + with the cache clusters in the replication group. + This parameter can be used only with replication groups containing + cache clusters running in an Amazon Virtual Private Cloud (VPC). + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which replication group system maintenance can occur. Note + that system maintenance may result in an outage. This change is + made immediately. If you are moving this window to the current + time, there must be at least 120 minutes between the current time + and end of the window to ensure that pending changes are applied. + + :type notification_topic_arn: string + :param notification_topic_arn: + The Amazon Resource Name (ARN) of the SNS topic to which notifications + will be sent. + + The SNS topic owner must be same as the replication group owner. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of the cache parameter + group to apply to all of the cache nodes in this replication group. + This change is asynchronously applied as soon as possible for + parameters when the ApplyImmediately parameter is specified as true + for this request. + + :type notification_topic_status: string + :param notification_topic_status: The status of the Amazon SNS + notification topic for the replication group. Notifications are + sent only if the status is active . + Valid values: `active` | `inactive` + + :type apply_immediately: boolean + :param apply_immediately: If `True`, this parameter causes the + modifications in this request and any pending modifications to be + applied, asynchronously and as soon as possible, regardless of the + PreferredMaintenanceWindow setting for the replication group. + If `False`, then changes to the nodes in the replication group are + applied on the next maintenance reboot, or the next failure reboot, + whichever occurs first. + + Valid values: `True` | `False` + + Default: `False` + + :type engine_version: string + :param engine_version: The upgraded version of the cache engine to be + run on the nodes in the replication group.. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Determines whether minor engine + upgrades will be applied automatically to all of the cache nodes in + the replication group during the maintenance window. A value of + `True` allows these upgrades to occur; `False` disables automatic + upgrades. + + :type primary_cluster_id: string + :param primary_cluster_id: If this parameter is specified, ElastiCache + will promote each of the nodes in the specified cache cluster to + the primary role. The nodes of all other clusters in the + replication group will be read replicas. + + """ + params = {'ReplicationGroupId': replication_group_id, } + if replication_group_description is not None: + params['ReplicationGroupDescription'] = replication_group_description + if cache_security_group_names is not None: + self.build_list_params(params, + cache_security_group_names, + 'CacheSecurityGroupNames.member') + if security_group_ids is not None: + self.build_list_params(params, + security_group_ids, + 'SecurityGroupIds.member') + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if notification_topic_arn is not None: + params['NotificationTopicArn'] = notification_topic_arn + if cache_parameter_group_name is not None: + params['CacheParameterGroupName'] = cache_parameter_group_name + if notification_topic_status is not None: + params['NotificationTopicStatus'] = notification_topic_status + if apply_immediately is not None: + params['ApplyImmediately'] = str( + apply_immediately).lower() + if engine_version is not None: + params['EngineVersion'] = engine_version + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if primary_cluster_id is not None: + params['PrimaryClusterId'] = primary_cluster_id + return self._make_request( + action='ModifyReplicationGroup', + verb='POST', + path='/', params=params) + + def purchase_reserved_cache_nodes_offering(self, + reserved_cache_nodes_offering_id, + reserved_cache_node_id=None, + cache_node_count=None): + """ + The PurchaseReservedCacheNodesOffering operation allows you to + purchase a reserved cache node offering. + + :type reserved_cache_nodes_offering_id: string + :param reserved_cache_nodes_offering_id: The ID of the reserved cache + node offering to purchase. + Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + + :type reserved_cache_node_id: string + :param reserved_cache_node_id: A customer-specified identifier to track + this reservation. + Example: myreservationID + + :type cache_node_count: integer + :param cache_node_count: The number of cache node instances to reserve. + Default: `1` + + """ + params = { + 'ReservedCacheNodesOfferingId': reserved_cache_nodes_offering_id, + } + if reserved_cache_node_id is not None: + params['ReservedCacheNodeId'] = reserved_cache_node_id + if cache_node_count is not None: + params['CacheNodeCount'] = cache_node_count + return self._make_request( + action='PurchaseReservedCacheNodesOffering', + verb='POST', + path='/', params=params) + + def reboot_cache_cluster(self, cache_cluster_id, + cache_node_ids_to_reboot): + """ + The RebootCacheCluster operation reboots some, or all, of the + cache cluster nodes within a provisioned cache cluster. This + API will apply any modified cache parameter groups to the + cache cluster. The reboot action takes place as soon as + possible, and results in a momentary outage to the cache + cluster. During the reboot, the cache cluster status is set to + REBOOTING. + + The reboot causes the contents of the cache (for each cache + cluster node being rebooted) to be lost. + + When the reboot is complete, a cache cluster event is created. + + :type cache_cluster_id: string + :param cache_cluster_id: The cache cluster identifier. This parameter + is stored as a lowercase string. + + :type cache_node_ids_to_reboot: list + :param cache_node_ids_to_reboot: A list of cache cluster node IDs to + reboot. A node ID is a numeric identifier (0001, 0002, etc.). To + reboot an entire cache cluster, specify all of the cache cluster + node IDs. + + """ + params = {'CacheClusterId': cache_cluster_id, } + self.build_list_params(params, + cache_node_ids_to_reboot, + 'CacheNodeIdsToReboot.member') + return self._make_request( + action='RebootCacheCluster', + verb='POST', + path='/', params=params) + + def reset_cache_parameter_group(self, cache_parameter_group_name, + parameter_name_values, + reset_all_parameters=None): + """ + The ResetCacheParameterGroup operation modifies the parameters + of a cache parameter group to the engine or system default + value. You can reset specific parameters by submitting a list + of parameter names. To reset the entire cache parameter group, + specify the ResetAllParameters and CacheParameterGroupName + parameters. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of the cache parameter + group to reset. + + :type reset_all_parameters: boolean + :param reset_all_parameters: If true , all parameters in the cache + parameter group will be reset to default values. If false , no such + action occurs. + Valid values: `True` | `False` + + :type parameter_name_values: list + :param parameter_name_values: An array of parameter names to be reset. + If you are not resetting the entire cache parameter group, you must + specify at least one parameter name. + + """ + params = { + 'CacheParameterGroupName': cache_parameter_group_name, + } + self.build_complex_list_params( + params, parameter_name_values, + 'ParameterNameValues.member', + ('ParameterName', 'ParameterValue')) + if reset_all_parameters is not None: + params['ResetAllParameters'] = str( + reset_all_parameters).lower() + return self._make_request( + action='ResetCacheParameterGroup', + verb='POST', + path='/', params=params) + + def revoke_cache_security_group_ingress(self, cache_security_group_name, + ec2_security_group_name, + ec2_security_group_owner_id): + """ + The RevokeCacheSecurityGroupIngress operation revokes ingress + from a cache security group. Use this operation to disallow + access from an Amazon EC2 security group that had been + previously authorized. + + :type cache_security_group_name: string + :param cache_security_group_name: The name of the cache security group + to revoke ingress from. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the Amazon EC2 security + group to revoke access from. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS account number of the + Amazon EC2 security group owner. Note that this is not the same + thing as an AWS access key ID - you must provide a valid AWS + account number for this parameter. + + """ + params = { + 'CacheSecurityGroupName': cache_security_group_name, + 'EC2SecurityGroupName': ec2_security_group_name, + 'EC2SecurityGroupOwnerId': ec2_security_group_owner_id, + } + return self._make_request( + action='RevokeCacheSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + raise self.ResponseError(response.status, response.reason, body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..afb23e56c88fb4ab32d84c6e21806992b4f86e04 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/__init__.py @@ -0,0 +1,45 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS Elastic Transcoder service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.elastictranscoder.layer1 import ElasticTranscoderConnection + return get_regions( + 'elastictranscoder', + connection_cls=ElasticTranscoderConnection + ) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..94b399f537dae1d06ace6a6c8a54fbe852cab9c8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/exceptions.py @@ -0,0 +1,50 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class LimitExceededException(JSONResponseError): + pass + + +class ResourceInUseException(JSONResponseError): + pass + + +class AccessDeniedException(JSONResponseError): + pass + + +class ResourceNotFoundException(JSONResponseError): + pass + + +class InternalServiceException(JSONResponseError): + pass + + +class ValidationException(JSONResponseError): + pass + + +class IncompatibleVersionException(JSONResponseError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..0f4dc9c74cc727a2717bb4b12788780d39fba7dd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/elastictranscoder/layer1.py @@ -0,0 +1,932 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import json +from boto.exception import JSONResponseError +from boto.connection import AWSAuthConnection +from boto.regioninfo import RegionInfo +from boto.elastictranscoder import exceptions + + +class ElasticTranscoderConnection(AWSAuthConnection): + """ + AWS Elastic Transcoder Service + The AWS Elastic Transcoder Service. + """ + APIVersion = "2012-09-25" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "elastictranscoder.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "IncompatibleVersionException": exceptions.IncompatibleVersionException, + "LimitExceededException": exceptions.LimitExceededException, + "ResourceInUseException": exceptions.ResourceInUseException, + "AccessDeniedException": exceptions.AccessDeniedException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InternalServiceException": exceptions.InternalServiceException, + "ValidationException": exceptions.ValidationException, + } + + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + else: + del kwargs['region'] + kwargs['host'] = region.endpoint + super(ElasticTranscoderConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def cancel_job(self, id=None): + """ + The CancelJob operation cancels an unfinished job. + You can only cancel a job that has a status of `Submitted`. To + prevent a pipeline from starting to process a job while you're + getting the job identifier, use UpdatePipelineStatus to + temporarily pause the pipeline. + + :type id: string + :param id: The identifier of the job that you want to cancel. + To get a list of the jobs (including their `jobId`) that have a status + of `Submitted`, use the ListJobsByStatus API action. + + """ + uri = '/2012-09-25/jobs/{0}'.format(id) + return self.make_request('DELETE', uri, expected_status=202) + + def create_job(self, pipeline_id=None, input_name=None, output=None, + outputs=None, output_key_prefix=None, playlists=None): + """ + When you create a job, Elastic Transcoder returns JSON data + that includes the values that you specified plus information + about the job that is created. + + If you have specified more than one output for your jobs (for + example, one output for the Kindle Fire and another output for + the Apple iPhone 4s), you currently must use the Elastic + Transcoder API to list the jobs (as opposed to the AWS + Console). + + :type pipeline_id: string + :param pipeline_id: The `Id` of the pipeline that you want Elastic + Transcoder to use for transcoding. The pipeline determines several + settings, including the Amazon S3 bucket from which Elastic + Transcoder gets the files to transcode and the bucket into which + Elastic Transcoder puts the transcoded files. + + :type input_name: dict + :param input_name: A section of the request body that provides + information about the file that is being transcoded. + + :type output: dict + :param output: The `CreateJobOutput` structure. + + :type outputs: list + :param outputs: A section of the request body that provides information + about the transcoded (target) files. We recommend that you use the + `Outputs` syntax instead of the `Output` syntax. + + :type output_key_prefix: string + :param output_key_prefix: The value, if any, that you want Elastic + Transcoder to prepend to the names of all files that this job + creates, including output files, thumbnails, and playlists. + + :type playlists: list + :param playlists: If you specify a preset in `PresetId` for which the + value of `Container` is ts (MPEG-TS), Playlists contains + information about the master playlists that you want Elastic + Transcoder to create. + We recommend that you create only one master playlist. The maximum + number of master playlists in a job is 30. + + """ + uri = '/2012-09-25/jobs' + params = {} + if pipeline_id is not None: + params['PipelineId'] = pipeline_id + if input_name is not None: + params['Input'] = input_name + if output is not None: + params['Output'] = output + if outputs is not None: + params['Outputs'] = outputs + if output_key_prefix is not None: + params['OutputKeyPrefix'] = output_key_prefix + if playlists is not None: + params['Playlists'] = playlists + return self.make_request('POST', uri, expected_status=201, + data=json.dumps(params)) + + def create_pipeline(self, name=None, input_bucket=None, + output_bucket=None, role=None, notifications=None, + content_config=None, thumbnail_config=None): + """ + The CreatePipeline operation creates a pipeline with settings + that you specify. + + :type name: string + :param name: The name of the pipeline. We recommend that the name be + unique within the AWS account, but uniqueness is not enforced. + Constraints: Maximum 40 characters. + + :type input_bucket: string + :param input_bucket: The Amazon S3 bucket in which you saved the media + files that you want to transcode. + + :type output_bucket: string + :param output_bucket: The Amazon S3 bucket in which you want Elastic + Transcoder to save the transcoded files. (Use this, or use + ContentConfig:Bucket plus ThumbnailConfig:Bucket.) + Specify this value when all of the following are true: + + + You want to save transcoded files, thumbnails (if any), and playlists + (if any) together in one bucket. + + You do not want to specify the users or groups who have access to the + transcoded files, thumbnails, and playlists. + + You do not want to specify the permissions that Elastic Transcoder + grants to the files. When Elastic Transcoder saves files in + `OutputBucket`, it grants full control over the files only to the + AWS account that owns the role that is specified by `Role`. + + You want to associate the transcoded files and thumbnails with the + Amazon S3 Standard storage class. + + + + If you want to save transcoded files and playlists in one bucket and + thumbnails in another bucket, specify which users can access the + transcoded files or the permissions the users have, or change the + Amazon S3 storage class, omit `OutputBucket` and specify values for + `ContentConfig` and `ThumbnailConfig` instead. + + :type role: string + :param role: The IAM Amazon Resource Name (ARN) for the role that you + want Elastic Transcoder to use to create the pipeline. + + :type notifications: dict + :param notifications: + The Amazon Simple Notification Service (Amazon SNS) topic that you want + to notify to report job status. + To receive notifications, you must also subscribe to the new topic in + the Amazon SNS console. + + + **Progressing**: The topic ARN for the Amazon Simple Notification + Service (Amazon SNS) topic that you want to notify when Elastic + Transcoder has started to process a job in this pipeline. This is + the ARN that Amazon SNS returned when you created the topic. For + more information, see Create a Topic in the Amazon Simple + Notification Service Developer Guide. + + **Completed**: The topic ARN for the Amazon SNS topic that you want + to notify when Elastic Transcoder has finished processing a job in + this pipeline. This is the ARN that Amazon SNS returned when you + created the topic. + + **Warning**: The topic ARN for the Amazon SNS topic that you want to + notify when Elastic Transcoder encounters a warning condition while + processing a job in this pipeline. This is the ARN that Amazon SNS + returned when you created the topic. + + **Error**: The topic ARN for the Amazon SNS topic that you want to + notify when Elastic Transcoder encounters an error condition while + processing a job in this pipeline. This is the ARN that Amazon SNS + returned when you created the topic. + + :type content_config: dict + :param content_config: + The optional `ContentConfig` object specifies information about the + Amazon S3 bucket in which you want Elastic Transcoder to save + transcoded files and playlists: which bucket to use, which users + you want to have access to the files, the type of access you want + users to have, and the storage class that you want to assign to the + files. + + If you specify values for `ContentConfig`, you must also specify values + for `ThumbnailConfig`. + + If you specify values for `ContentConfig` and `ThumbnailConfig`, omit + the `OutputBucket` object. + + + + **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder + to save transcoded files and playlists. + + **Permissions** (Optional): The Permissions object specifies which + users you want to have access to transcoded files and the type of + access you want them to have. You can grant permissions to a + maximum of 30 users and/or predefined Amazon S3 groups. + + **Grantee Type**: Specify the type of value that appears in the + `Grantee` object: + + + **Canonical**: The value in the `Grantee` object is either the + canonical user ID for an AWS account or an origin access identity + for an Amazon CloudFront distribution. For more information about + canonical user IDs, see Access Control List (ACL) Overview in the + Amazon Simple Storage Service Developer Guide. For more information + about using CloudFront origin access identities to require that + users use CloudFront URLs instead of Amazon S3 URLs, see Using an + Origin Access Identity to Restrict Access to Your Amazon S3 + Content. A canonical user ID is not the same as an AWS account + number. + + **Email**: The value in the `Grantee` object is the registered email + address of an AWS account. + + **Group**: The value in the `Grantee` object is one of the following + predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or + `LogDelivery`. + + + **Grantee**: The AWS user or group that you want to have access to + transcoded files and playlists. To identify the user or group, you + can specify the canonical user ID for an AWS account, an origin + access identity for a CloudFront distribution, the registered email + address of an AWS account, or a predefined Amazon S3 group + + **Access**: The permission that you want to give to the AWS user that + you specified in `Grantee`. Permissions are granted on the files + that Elastic Transcoder adds to the bucket, including playlists and + video files. Valid values include: + + + `READ`: The grantee can read the objects and metadata for objects + that Elastic Transcoder adds to the Amazon S3 bucket. + + `READ_ACP`: The grantee can read the object ACL for objects that + Elastic Transcoder adds to the Amazon S3 bucket. + + `WRITE_ACP`: The grantee can write the ACL for the objects that + Elastic Transcoder adds to the Amazon S3 bucket. + + `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP` + permissions for the objects that Elastic Transcoder adds to the + Amazon S3 bucket. + + + **StorageClass**: The Amazon S3 storage class, `Standard` or + `ReducedRedundancy`, that you want Elastic Transcoder to assign to + the video files and playlists that it stores in your Amazon S3 + bucket. + + :type thumbnail_config: dict + :param thumbnail_config: + The `ThumbnailConfig` object specifies several values, including the + Amazon S3 bucket in which you want Elastic Transcoder to save + thumbnail files, which users you want to have access to the files, + the type of access you want users to have, and the storage class + that you want to assign to the files. + + If you specify values for `ContentConfig`, you must also specify values + for `ThumbnailConfig` even if you don't want to create thumbnails. + + If you specify values for `ContentConfig` and `ThumbnailConfig`, omit + the `OutputBucket` object. + + + + **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder + to save thumbnail files. + + **Permissions** (Optional): The `Permissions` object specifies which + users and/or predefined Amazon S3 groups you want to have access to + thumbnail files, and the type of access you want them to have. You + can grant permissions to a maximum of 30 users and/or predefined + Amazon S3 groups. + + **GranteeType**: Specify the type of value that appears in the + Grantee object: + + + **Canonical**: The value in the `Grantee` object is either the + canonical user ID for an AWS account or an origin access identity + for an Amazon CloudFront distribution. A canonical user ID is not + the same as an AWS account number. + + **Email**: The value in the `Grantee` object is the registered email + address of an AWS account. + + **Group**: The value in the `Grantee` object is one of the following + predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or + `LogDelivery`. + + + **Grantee**: The AWS user or group that you want to have access to + thumbnail files. To identify the user or group, you can specify the + canonical user ID for an AWS account, an origin access identity for + a CloudFront distribution, the registered email address of an AWS + account, or a predefined Amazon S3 group. + + **Access**: The permission that you want to give to the AWS user that + you specified in `Grantee`. Permissions are granted on the + thumbnail files that Elastic Transcoder adds to the bucket. Valid + values include: + + + `READ`: The grantee can read the thumbnails and metadata for objects + that Elastic Transcoder adds to the Amazon S3 bucket. + + `READ_ACP`: The grantee can read the object ACL for thumbnails that + Elastic Transcoder adds to the Amazon S3 bucket. + + `WRITE_ACP`: The grantee can write the ACL for the thumbnails that + Elastic Transcoder adds to the Amazon S3 bucket. + + `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP` + permissions for the thumbnails that Elastic Transcoder adds to the + Amazon S3 bucket. + + + **StorageClass**: The Amazon S3 storage class, `Standard` or + `ReducedRedundancy`, that you want Elastic Transcoder to assign to + the thumbnails that it stores in your Amazon S3 bucket. + + """ + uri = '/2012-09-25/pipelines' + params = {} + if name is not None: + params['Name'] = name + if input_bucket is not None: + params['InputBucket'] = input_bucket + if output_bucket is not None: + params['OutputBucket'] = output_bucket + if role is not None: + params['Role'] = role + if notifications is not None: + params['Notifications'] = notifications + if content_config is not None: + params['ContentConfig'] = content_config + if thumbnail_config is not None: + params['ThumbnailConfig'] = thumbnail_config + return self.make_request('POST', uri, expected_status=201, + data=json.dumps(params)) + + def create_preset(self, name=None, description=None, container=None, + video=None, audio=None, thumbnails=None): + """ + The CreatePreset operation creates a preset with settings that + you specify. + Elastic Transcoder checks the CreatePreset settings to ensure + that they meet Elastic Transcoder requirements and to + determine whether they comply with H.264 standards. If your + settings are not valid for Elastic Transcoder, Elastic + Transcoder returns an HTTP 400 response ( + `ValidationException`) and does not create the preset. If the + settings are valid for Elastic Transcoder but aren't strictly + compliant with the H.264 standard, Elastic Transcoder creates + the preset and returns a warning message in the response. This + helps you determine whether your settings comply with the + H.264 standard while giving you greater flexibility with + respect to the video that Elastic Transcoder produces. + Elastic Transcoder uses the H.264 video-compression format. + For more information, see the International Telecommunication + Union publication Recommendation ITU-T H.264: Advanced video + coding for generic audiovisual services . + + :type name: string + :param name: The name of the preset. We recommend that the name be + unique within the AWS account, but uniqueness is not enforced. + + :type description: string + :param description: A description of the preset. + + :type container: string + :param container: The container type for the output file. Valid values + include `mp3`, `mp4`, `ogg`, `ts`, and `webm`. + + :type video: dict + :param video: A section of the request body that specifies the video + parameters. + + :type audio: dict + :param audio: A section of the request body that specifies the audio + parameters. + + :type thumbnails: dict + :param thumbnails: A section of the request body that specifies the + thumbnail parameters, if any. + + """ + uri = '/2012-09-25/presets' + params = {} + if name is not None: + params['Name'] = name + if description is not None: + params['Description'] = description + if container is not None: + params['Container'] = container + if video is not None: + params['Video'] = video + if audio is not None: + params['Audio'] = audio + if thumbnails is not None: + params['Thumbnails'] = thumbnails + return self.make_request('POST', uri, expected_status=201, + data=json.dumps(params)) + + def delete_pipeline(self, id=None): + """ + The DeletePipeline operation removes a pipeline. + + You can only delete a pipeline that has never been used or + that is not currently in use (doesn't contain any active + jobs). If the pipeline is currently in use, `DeletePipeline` + returns an error. + + :type id: string + :param id: The identifier of the pipeline that you want to delete. + + """ + uri = '/2012-09-25/pipelines/{0}'.format(id) + return self.make_request('DELETE', uri, expected_status=202) + + def delete_preset(self, id=None): + """ + The DeletePreset operation removes a preset that you've added + in an AWS region. + + You can't delete the default presets that are included with + Elastic Transcoder. + + :type id: string + :param id: The identifier of the preset for which you want to get + detailed information. + + """ + uri = '/2012-09-25/presets/{0}'.format(id) + return self.make_request('DELETE', uri, expected_status=202) + + def list_jobs_by_pipeline(self, pipeline_id=None, ascending=None, + page_token=None): + """ + The ListJobsByPipeline operation gets a list of the jobs + currently in a pipeline. + + Elastic Transcoder returns all of the jobs currently in the + specified pipeline. The response body contains one element for + each job that satisfies the search criteria. + + :type pipeline_id: string + :param pipeline_id: The ID of the pipeline for which you want to get + job information. + + :type ascending: string + :param ascending: To list jobs in chronological order by the date and + time that they were submitted, enter `True`. To list jobs in + reverse chronological order, enter `False`. + + :type page_token: string + :param page_token: When Elastic Transcoder returns more than one page + of results, use `pageToken` in subsequent `GET` requests to get + each successive page of results. + + """ + uri = '/2012-09-25/jobsByPipeline/{0}'.format(pipeline_id) + params = {} + if pipeline_id is not None: + params['PipelineId'] = pipeline_id + if ascending is not None: + params['Ascending'] = ascending + if page_token is not None: + params['PageToken'] = page_token + return self.make_request('GET', uri, expected_status=200, + params=params) + + def list_jobs_by_status(self, status=None, ascending=None, + page_token=None): + """ + The ListJobsByStatus operation gets a list of jobs that have a + specified status. The response body contains one element for + each job that satisfies the search criteria. + + :type status: string + :param status: To get information about all of the jobs associated with + the current AWS account that have a given status, specify the + following status: `Submitted`, `Progressing`, `Complete`, + `Canceled`, or `Error`. + + :type ascending: string + :param ascending: To list jobs in chronological order by the date and + time that they were submitted, enter `True`. To list jobs in + reverse chronological order, enter `False`. + + :type page_token: string + :param page_token: When Elastic Transcoder returns more than one page + of results, use `pageToken` in subsequent `GET` requests to get + each successive page of results. + + """ + uri = '/2012-09-25/jobsByStatus/{0}'.format(status) + params = {} + if status is not None: + params['Status'] = status + if ascending is not None: + params['Ascending'] = ascending + if page_token is not None: + params['PageToken'] = page_token + return self.make_request('GET', uri, expected_status=200, + params=params) + + def list_pipelines(self, ascending=None, page_token=None): + """ + The ListPipelines operation gets a list of the pipelines + associated with the current AWS account. + + :type ascending: string + :param ascending: To list pipelines in chronological order by the date + and time that they were created, enter `True`. To list pipelines in + reverse chronological order, enter `False`. + + :type page_token: string + :param page_token: When Elastic Transcoder returns more than one page + of results, use `pageToken` in subsequent `GET` requests to get + each successive page of results. + + """ + uri = '/2012-09-25/pipelines'.format() + params = {} + if ascending is not None: + params['Ascending'] = ascending + if page_token is not None: + params['PageToken'] = page_token + return self.make_request('GET', uri, expected_status=200, + params=params) + + def list_presets(self, ascending=None, page_token=None): + """ + The ListPresets operation gets a list of the default presets + included with Elastic Transcoder and the presets that you've + added in an AWS region. + + :type ascending: string + :param ascending: To list presets in chronological order by the date + and time that they were created, enter `True`. To list presets in + reverse chronological order, enter `False`. + + :type page_token: string + :param page_token: When Elastic Transcoder returns more than one page + of results, use `pageToken` in subsequent `GET` requests to get + each successive page of results. + + """ + uri = '/2012-09-25/presets'.format() + params = {} + if ascending is not None: + params['Ascending'] = ascending + if page_token is not None: + params['PageToken'] = page_token + return self.make_request('GET', uri, expected_status=200, + params=params) + + def read_job(self, id=None): + """ + The ReadJob operation returns detailed information about a + job. + + :type id: string + :param id: The identifier of the job for which you want to get detailed + information. + + """ + uri = '/2012-09-25/jobs/{0}'.format(id) + return self.make_request('GET', uri, expected_status=200) + + def read_pipeline(self, id=None): + """ + The ReadPipeline operation gets detailed information about a + pipeline. + + :type id: string + :param id: The identifier of the pipeline to read. + + """ + uri = '/2012-09-25/pipelines/{0}'.format(id) + return self.make_request('GET', uri, expected_status=200) + + def read_preset(self, id=None): + """ + The ReadPreset operation gets detailed information about a + preset. + + :type id: string + :param id: The identifier of the preset for which you want to get + detailed information. + + """ + uri = '/2012-09-25/presets/{0}'.format(id) + return self.make_request('GET', uri, expected_status=200) + + def test_role(self, role=None, input_bucket=None, output_bucket=None, + topics=None): + """ + The TestRole operation tests the IAM role used to create the + pipeline. + + The `TestRole` action lets you determine whether the IAM role + you are using has sufficient permissions to let Elastic + Transcoder perform tasks associated with the transcoding + process. The action attempts to assume the specified IAM role, + checks read access to the input and output buckets, and tries + to send a test notification to Amazon SNS topics that you + specify. + + :type role: string + :param role: The IAM Amazon Resource Name (ARN) for the role that you + want Elastic Transcoder to test. + + :type input_bucket: string + :param input_bucket: The Amazon S3 bucket that contains media files to + be transcoded. The action attempts to read from this bucket. + + :type output_bucket: string + :param output_bucket: The Amazon S3 bucket that Elastic Transcoder will + write transcoded media files to. The action attempts to read from + this bucket. + + :type topics: list + :param topics: The ARNs of one or more Amazon Simple Notification + Service (Amazon SNS) topics that you want the action to send a test + notification to. + + """ + uri = '/2012-09-25/roleTests' + params = {} + if role is not None: + params['Role'] = role + if input_bucket is not None: + params['InputBucket'] = input_bucket + if output_bucket is not None: + params['OutputBucket'] = output_bucket + if topics is not None: + params['Topics'] = topics + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params)) + + def update_pipeline(self, id, name=None, input_bucket=None, role=None, + notifications=None, content_config=None, + thumbnail_config=None): + """ + Use the `UpdatePipeline` operation to update settings for a + pipeline. When you change pipeline settings, your changes take + effect immediately. Jobs that you have already submitted and + that Elastic Transcoder has not started to process are + affected in addition to jobs that you submit after you change + settings. + + :type id: string + :param id: The ID of the pipeline that you want to update. + + :type name: string + :param name: The name of the pipeline. We recommend that the name be + unique within the AWS account, but uniqueness is not enforced. + Constraints: Maximum 40 characters + + :type input_bucket: string + :param input_bucket: The Amazon S3 bucket in which you saved the media + files that you want to transcode and the graphics that you want to + use as watermarks. + + :type role: string + :param role: The IAM Amazon Resource Name (ARN) for the role that you + want Elastic Transcoder to use to transcode jobs for this pipeline. + + :type notifications: dict + :param notifications: + The Amazon Simple Notification Service (Amazon SNS) topic or topics to + notify in order to report job status. + To receive notifications, you must also subscribe to the new topic in + the Amazon SNS console. + + :type content_config: dict + :param content_config: + The optional `ContentConfig` object specifies information about the + Amazon S3 bucket in which you want Elastic Transcoder to save + transcoded files and playlists: which bucket to use, which users + you want to have access to the files, the type of access you want + users to have, and the storage class that you want to assign to the + files. + + If you specify values for `ContentConfig`, you must also specify values + for `ThumbnailConfig`. + + If you specify values for `ContentConfig` and `ThumbnailConfig`, omit + the `OutputBucket` object. + + + + **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder + to save transcoded files and playlists. + + **Permissions** (Optional): The Permissions object specifies which + users you want to have access to transcoded files and the type of + access you want them to have. You can grant permissions to a + maximum of 30 users and/or predefined Amazon S3 groups. + + **Grantee Type**: Specify the type of value that appears in the + `Grantee` object: + + + **Canonical**: The value in the `Grantee` object is either the + canonical user ID for an AWS account or an origin access identity + for an Amazon CloudFront distribution. For more information about + canonical user IDs, see Access Control List (ACL) Overview in the + Amazon Simple Storage Service Developer Guide. For more information + about using CloudFront origin access identities to require that + users use CloudFront URLs instead of Amazon S3 URLs, see Using an + Origin Access Identity to Restrict Access to Your Amazon S3 + Content. A canonical user ID is not the same as an AWS account + number. + + **Email**: The value in the `Grantee` object is the registered email + address of an AWS account. + + **Group**: The value in the `Grantee` object is one of the following + predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or + `LogDelivery`. + + + **Grantee**: The AWS user or group that you want to have access to + transcoded files and playlists. To identify the user or group, you + can specify the canonical user ID for an AWS account, an origin + access identity for a CloudFront distribution, the registered email + address of an AWS account, or a predefined Amazon S3 group + + **Access**: The permission that you want to give to the AWS user that + you specified in `Grantee`. Permissions are granted on the files + that Elastic Transcoder adds to the bucket, including playlists and + video files. Valid values include: + + + `READ`: The grantee can read the objects and metadata for objects + that Elastic Transcoder adds to the Amazon S3 bucket. + + `READ_ACP`: The grantee can read the object ACL for objects that + Elastic Transcoder adds to the Amazon S3 bucket. + + `WRITE_ACP`: The grantee can write the ACL for the objects that + Elastic Transcoder adds to the Amazon S3 bucket. + + `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP` + permissions for the objects that Elastic Transcoder adds to the + Amazon S3 bucket. + + + **StorageClass**: The Amazon S3 storage class, `Standard` or + `ReducedRedundancy`, that you want Elastic Transcoder to assign to + the video files and playlists that it stores in your Amazon S3 + bucket. + + :type thumbnail_config: dict + :param thumbnail_config: + The `ThumbnailConfig` object specifies several values, including the + Amazon S3 bucket in which you want Elastic Transcoder to save + thumbnail files, which users you want to have access to the files, + the type of access you want users to have, and the storage class + that you want to assign to the files. + + If you specify values for `ContentConfig`, you must also specify values + for `ThumbnailConfig` even if you don't want to create thumbnails. + + If you specify values for `ContentConfig` and `ThumbnailConfig`, omit + the `OutputBucket` object. + + + + **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder + to save thumbnail files. + + **Permissions** (Optional): The `Permissions` object specifies which + users and/or predefined Amazon S3 groups you want to have access to + thumbnail files, and the type of access you want them to have. You + can grant permissions to a maximum of 30 users and/or predefined + Amazon S3 groups. + + **GranteeType**: Specify the type of value that appears in the + Grantee object: + + + **Canonical**: The value in the `Grantee` object is either the + canonical user ID for an AWS account or an origin access identity + for an Amazon CloudFront distribution. A canonical user ID is not + the same as an AWS account number. + + **Email**: The value in the `Grantee` object is the registered email + address of an AWS account. + + **Group**: The value in the `Grantee` object is one of the following + predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or + `LogDelivery`. + + + **Grantee**: The AWS user or group that you want to have access to + thumbnail files. To identify the user or group, you can specify the + canonical user ID for an AWS account, an origin access identity for + a CloudFront distribution, the registered email address of an AWS + account, or a predefined Amazon S3 group. + + **Access**: The permission that you want to give to the AWS user that + you specified in `Grantee`. Permissions are granted on the + thumbnail files that Elastic Transcoder adds to the bucket. Valid + values include: + + + `READ`: The grantee can read the thumbnails and metadata for objects + that Elastic Transcoder adds to the Amazon S3 bucket. + + `READ_ACP`: The grantee can read the object ACL for thumbnails that + Elastic Transcoder adds to the Amazon S3 bucket. + + `WRITE_ACP`: The grantee can write the ACL for the thumbnails that + Elastic Transcoder adds to the Amazon S3 bucket. + + `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP` + permissions for the thumbnails that Elastic Transcoder adds to the + Amazon S3 bucket. + + + **StorageClass**: The Amazon S3 storage class, `Standard` or + `ReducedRedundancy`, that you want Elastic Transcoder to assign to + the thumbnails that it stores in your Amazon S3 bucket. + + """ + uri = '/2012-09-25/pipelines/{0}'.format(id) + params = {} + if name is not None: + params['Name'] = name + if input_bucket is not None: + params['InputBucket'] = input_bucket + if role is not None: + params['Role'] = role + if notifications is not None: + params['Notifications'] = notifications + if content_config is not None: + params['ContentConfig'] = content_config + if thumbnail_config is not None: + params['ThumbnailConfig'] = thumbnail_config + return self.make_request('PUT', uri, expected_status=200, + data=json.dumps(params)) + + def update_pipeline_notifications(self, id=None, notifications=None): + """ + With the UpdatePipelineNotifications operation, you can update + Amazon Simple Notification Service (Amazon SNS) notifications + for a pipeline. + + When you update notifications for a pipeline, Elastic + Transcoder returns the values that you specified in the + request. + + :type id: string + :param id: The identifier of the pipeline for which you want to change + notification settings. + + :type notifications: dict + :param notifications: + The topic ARN for the Amazon Simple Notification Service (Amazon SNS) + topic that you want to notify to report job status. + To receive notifications, you must also subscribe to the new topic in + the Amazon SNS console. + + + **Progressing**: The topic ARN for the Amazon Simple Notification + Service (Amazon SNS) topic that you want to notify when Elastic + Transcoder has started to process jobs that are added to this + pipeline. This is the ARN that Amazon SNS returned when you created + the topic. + + **Completed**: The topic ARN for the Amazon SNS topic that you want + to notify when Elastic Transcoder has finished processing a job. + This is the ARN that Amazon SNS returned when you created the + topic. + + **Warning**: The topic ARN for the Amazon SNS topic that you want to + notify when Elastic Transcoder encounters a warning condition. This + is the ARN that Amazon SNS returned when you created the topic. + + **Error**: The topic ARN for the Amazon SNS topic that you want to + notify when Elastic Transcoder encounters an error condition. This + is the ARN that Amazon SNS returned when you created the topic. + + """ + uri = '/2012-09-25/pipelines/{0}/notifications'.format(id) + params = {} + if id is not None: + params['Id'] = id + if notifications is not None: + params['Notifications'] = notifications + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params)) + + def update_pipeline_status(self, id=None, status=None): + """ + The UpdatePipelineStatus operation pauses or reactivates a + pipeline, so that the pipeline stops or restarts the + processing of jobs. + + Changing the pipeline status is useful if you want to cancel + one or more jobs. You can't cancel jobs after Elastic + Transcoder has started processing them; if you pause the + pipeline to which you submitted the jobs, you have more time + to get the job IDs for the jobs that you want to cancel, and + to send a CancelJob request. + + :type id: string + :param id: The identifier of the pipeline to update. + + :type status: string + :param status: + The desired status of the pipeline: + + + + `Active`: The pipeline is processing jobs. + + `Paused`: The pipeline is not currently processing jobs. + + """ + uri = '/2012-09-25/pipelines/{0}/status'.format(id) + params = {} + if id is not None: + params['Id'] = id + if status is not None: + params['Status'] = status + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params)) + + def make_request(self, verb, resource, headers=None, data='', + expected_status=None, params=None): + if headers is None: + headers = {} + response = super(ElasticTranscoderConnection, self).make_request( + verb, resource, headers=headers, data=data, params=params) + body = json.loads(response.read().decode('utf-8')) + if response.status == expected_status: + return body + else: + error_type = response.getheader('x-amzn-ErrorType').split(':')[0] + error_class = self._faults.get(error_type, self.ResponseError) + raise error_class(response.status, response.reason, body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/emr/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/emr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f3181d289f419b0255474f5685acc33a0111e57 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/emr/__init__.py @@ -0,0 +1,49 @@ +# Copyright (c) 2010 Spotify AB +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +This module provies an interface to the Elastic MapReduce (EMR) +service from AWS. +""" +from boto.emr.connection import EmrConnection +from boto.emr.step import Step, StreamingStep, JarStep +from boto.emr.bootstrap_action import BootstrapAction +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon Elastic MapReduce service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + return get_regions('elasticmapreduce', connection_cls=EmrConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/emr/bootstrap_action.py b/desktop/core/ext-py/boto-2.38.0/boto/emr/bootstrap_action.py new file mode 100644 index 0000000000000000000000000000000000000000..5a01fd21cc8ed24711580489de3274b973751018 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/emr/bootstrap_action.py @@ -0,0 +1,46 @@ +# Copyright (c) 2010 Spotify AB +# Copyright (c) 2010 Yelp +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.compat import six + +class BootstrapAction(object): + def __init__(self, name, path, bootstrap_action_args): + self.name = name + self.path = path + + if isinstance(bootstrap_action_args, six.string_types): + bootstrap_action_args = [bootstrap_action_args] + + self.bootstrap_action_args = bootstrap_action_args + + def args(self): + args = [] + + if self.bootstrap_action_args: + args.extend(self.bootstrap_action_args) + + return args + + def __repr__(self): + return '%s.%s(name=%r, path=%r, bootstrap_action_args=%r)' % ( + self.__class__.__module__, self.__class__.__name__, + self.name, self.path, self.bootstrap_action_args) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/emr/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/emr/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..d15852ea253779a865edde69120b6c4bde26989a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/emr/connection.py @@ -0,0 +1,754 @@ +# Copyright (c) 2010 Spotify AB +# Copyright (c) 2010-2011 Yelp +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a connection to the EMR service +""" +import types + +import boto +import boto.utils +from boto.ec2.regioninfo import RegionInfo +from boto.emr.emrobject import AddInstanceGroupsResponse, BootstrapActionList, \ + Cluster, ClusterSummaryList, HadoopStep, \ + InstanceGroupList, InstanceList, JobFlow, \ + JobFlowStepList, \ + ModifyInstanceGroupsResponse, \ + RunJobFlowResponse, StepSummaryList +from boto.emr.step import JarStep +from boto.connection import AWSQueryConnection +from boto.exception import EmrResponseError +from boto.compat import six + + +class EmrConnection(AWSQueryConnection): + + APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31') + DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint', + 'elasticmapreduce.us-east-1.amazonaws.com') + ResponseError = EmrResponseError + + # Constants for AWS Console debugging + DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar' + DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(EmrConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + # Many of the EMR hostnames are of the form: + # ..amazonaws.com + # rather than the more common: + # ..amazonaws.com + # so we need to explicitly set the region_name and service_name + # for the SigV4 signing. + self.auth_region_name = self.region.name + self.auth_service_name = 'elasticmapreduce' + + def _required_auth_capability(self): + return ['hmac-v4'] + + def describe_cluster(self, cluster_id): + """ + Describes an Elastic MapReduce cluster + + :type cluster_id: str + :param cluster_id: The cluster id of interest + """ + params = { + 'ClusterId': cluster_id + } + return self.get_object('DescribeCluster', params, Cluster) + + def describe_jobflow(self, jobflow_id): + """ + Describes a single Elastic MapReduce job flow + + :type jobflow_id: str + :param jobflow_id: The job flow id of interest + """ + jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id]) + if jobflows: + return jobflows[0] + + def describe_jobflows(self, states=None, jobflow_ids=None, + created_after=None, created_before=None): + """ + Retrieve all the Elastic MapReduce job flows on your account + + :type states: list + :param states: A list of strings with job flow states wanted + + :type jobflow_ids: list + :param jobflow_ids: A list of job flow IDs + :type created_after: datetime + :param created_after: Bound on job flow creation time + + :type created_before: datetime + :param created_before: Bound on job flow creation time + """ + params = {} + + if states: + self.build_list_params(params, states, 'JobFlowStates.member') + if jobflow_ids: + self.build_list_params(params, jobflow_ids, 'JobFlowIds.member') + if created_after: + params['CreatedAfter'] = created_after.strftime( + boto.utils.ISO8601) + if created_before: + params['CreatedBefore'] = created_before.strftime( + boto.utils.ISO8601) + + return self.get_list('DescribeJobFlows', params, [('member', JobFlow)]) + + def describe_step(self, cluster_id, step_id): + """ + Describe an Elastic MapReduce step + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type step_id: str + :param step_id: The step id of interest + """ + params = { + 'ClusterId': cluster_id, + 'StepId': step_id + } + + return self.get_object('DescribeStep', params, HadoopStep) + + def list_bootstrap_actions(self, cluster_id, marker=None): + """ + Get a list of bootstrap actions for an Elastic MapReduce cluster + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type marker: str + :param marker: Pagination marker + """ + params = { + 'ClusterId': cluster_id + } + + if marker: + params['Marker'] = marker + + return self.get_object('ListBootstrapActions', params, BootstrapActionList) + + def list_clusters(self, created_after=None, created_before=None, + cluster_states=None, marker=None): + """ + List Elastic MapReduce clusters with optional filtering + + :type created_after: datetime + :param created_after: Bound on cluster creation time + :type created_before: datetime + :param created_before: Bound on cluster creation time + :type cluster_states: list + :param cluster_states: Bound on cluster states + :type marker: str + :param marker: Pagination marker + """ + params = {} + if created_after: + params['CreatedAfter'] = created_after.strftime( + boto.utils.ISO8601) + if created_before: + params['CreatedBefore'] = created_before.strftime( + boto.utils.ISO8601) + if marker: + params['Marker'] = marker + + if cluster_states: + self.build_list_params(params, cluster_states, 'ClusterStates.member') + + return self.get_object('ListClusters', params, ClusterSummaryList) + + def list_instance_groups(self, cluster_id, marker=None): + """ + List EC2 instance groups in a cluster + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type marker: str + :param marker: Pagination marker + """ + params = { + 'ClusterId': cluster_id + } + + if marker: + params['Marker'] = marker + + return self.get_object('ListInstanceGroups', params, InstanceGroupList) + + def list_instances(self, cluster_id, instance_group_id=None, + instance_group_types=None, marker=None): + """ + List EC2 instances in a cluster + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type instance_group_id: str + :param instance_group_id: The EC2 instance group id of interest + :type instance_group_types: list + :param instance_group_types: Filter by EC2 instance group type + :type marker: str + :param marker: Pagination marker + """ + params = { + 'ClusterId': cluster_id + } + + if instance_group_id: + params['InstanceGroupId'] = instance_group_id + if marker: + params['Marker'] = marker + + if instance_group_types: + self.build_list_params(params, instance_group_types, + 'InstanceGroupTypeList.member') + + return self.get_object('ListInstances', params, InstanceList) + + def list_steps(self, cluster_id, step_states=None, marker=None): + """ + List cluster steps + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type step_states: list + :param step_states: Filter by step states + :type marker: str + :param marker: Pagination marker + """ + params = { + 'ClusterId': cluster_id + } + + if marker: + params['Marker'] = marker + + if step_states: + self.build_list_params(params, step_states, 'StepStateList.member') + + return self.get_object('ListSteps', params, StepSummaryList) + + def add_tags(self, resource_id, tags): + """ + Create new metadata tags for the specified resource id. + + :type resource_id: str + :param resource_id: The cluster id + + :type tags: dict + :param tags: A dictionary containing the name/value pairs. + If you want to create only a tag name, the + value for that tag should be the empty string + (e.g. '') or None. + """ + assert isinstance(resource_id, six.string_types) + params = { + 'ResourceId': resource_id, + } + params.update(self._build_tag_list(tags)) + return self.get_status('AddTags', params, verb='POST') + + def remove_tags(self, resource_id, tags): + """ + Remove metadata tags for the specified resource id. + + :type resource_id: str + :param resource_id: The cluster id + + :type tags: list + :param tags: A list of tag names to remove. + """ + params = { + 'ResourceId': resource_id, + } + params.update(self._build_string_list('TagKeys', tags)) + return self.get_status('RemoveTags', params, verb='POST') + + def terminate_jobflow(self, jobflow_id): + """ + Terminate an Elastic MapReduce job flow + + :type jobflow_id: str + :param jobflow_id: A jobflow id + """ + self.terminate_jobflows([jobflow_id]) + + def terminate_jobflows(self, jobflow_ids): + """ + Terminate an Elastic MapReduce job flow + + :type jobflow_ids: list + :param jobflow_ids: A list of job flow IDs + """ + params = {} + self.build_list_params(params, jobflow_ids, 'JobFlowIds.member') + return self.get_status('TerminateJobFlows', params, verb='POST') + + def add_jobflow_steps(self, jobflow_id, steps): + """ + Adds steps to a jobflow + + :type jobflow_id: str + :param jobflow_id: The job flow id + :type steps: list(boto.emr.Step) + :param steps: A list of steps to add to the job + """ + if not isinstance(steps, list): + steps = [steps] + params = {} + params['JobFlowId'] = jobflow_id + + # Step args + step_args = [self._build_step_args(step) for step in steps] + params.update(self._build_step_list(step_args)) + + return self.get_object( + 'AddJobFlowSteps', params, JobFlowStepList, verb='POST') + + def add_instance_groups(self, jobflow_id, instance_groups): + """ + Adds instance groups to a running cluster. + + :type jobflow_id: str + :param jobflow_id: The id of the jobflow which will take the + new instance groups + + :type instance_groups: list(boto.emr.InstanceGroup) + :param instance_groups: A list of instance groups to add to the job + """ + if not isinstance(instance_groups, list): + instance_groups = [instance_groups] + params = {} + params['JobFlowId'] = jobflow_id + params.update(self._build_instance_group_list_args(instance_groups)) + + return self.get_object('AddInstanceGroups', params, + AddInstanceGroupsResponse, verb='POST') + + def modify_instance_groups(self, instance_group_ids, new_sizes): + """ + Modify the number of nodes and configuration settings in an + instance group. + + :type instance_group_ids: list(str) + :param instance_group_ids: A list of the ID's of the instance + groups to be modified + + :type new_sizes: list(int) + :param new_sizes: A list of the new sizes for each instance group + """ + if not isinstance(instance_group_ids, list): + instance_group_ids = [instance_group_ids] + if not isinstance(new_sizes, list): + new_sizes = [new_sizes] + + instance_groups = zip(instance_group_ids, new_sizes) + + params = {} + for k, ig in enumerate(instance_groups): + # could be wrong - the example amazon gives uses + # InstanceRequestCount, while the api documentation + # says InstanceCount + params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0] + params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1] + + return self.get_object('ModifyInstanceGroups', params, + ModifyInstanceGroupsResponse, verb='POST') + + def run_jobflow(self, name, log_uri=None, ec2_keyname=None, + availability_zone=None, + master_instance_type='m1.small', + slave_instance_type='m1.small', num_instances=1, + action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False, + enable_debugging=False, + hadoop_version=None, + steps=[], + bootstrap_actions=[], + instance_groups=None, + additional_info=None, + ami_version=None, + api_params=None, + visible_to_all_users=None, + job_flow_role=None, + service_role=None): + """ + Runs a job flow + :type name: str + :param name: Name of the job flow + + :type log_uri: str + :param log_uri: URI of the S3 bucket to place logs + + :type ec2_keyname: str + :param ec2_keyname: EC2 key used for the instances + + :type availability_zone: str + :param availability_zone: EC2 availability zone of the cluster + + :type master_instance_type: str + :param master_instance_type: EC2 instance type of the master + + :type slave_instance_type: str + :param slave_instance_type: EC2 instance type of the slave nodes + + :type num_instances: int + :param num_instances: Number of instances in the Hadoop cluster + + :type action_on_failure: str + :param action_on_failure: Action to take if a step terminates + + :type keep_alive: bool + :param keep_alive: Denotes whether the cluster should stay + alive upon completion + + :type enable_debugging: bool + :param enable_debugging: Denotes whether AWS console debugging + should be enabled. + + :type hadoop_version: str + :param hadoop_version: Version of Hadoop to use. This no longer + defaults to '0.20' and now uses the AMI default. + + :type steps: list(boto.emr.Step) + :param steps: List of steps to add with the job + + :type bootstrap_actions: list(boto.emr.BootstrapAction) + :param bootstrap_actions: List of bootstrap actions that run + before Hadoop starts. + + :type instance_groups: list(boto.emr.InstanceGroup) + :param instance_groups: Optional list of instance groups to + use when creating this job. + NB: When provided, this argument supersedes num_instances + and master/slave_instance_type. + + :type ami_version: str + :param ami_version: Amazon Machine Image (AMI) version to use + for instances. Values accepted by EMR are '1.0', '2.0', and + 'latest'; EMR currently defaults to '1.0' if you don't set + 'ami_version'. + + :type additional_info: JSON str + :param additional_info: A JSON string for selecting additional features + + :type api_params: dict + :param api_params: a dictionary of additional parameters to pass + directly to the EMR API (so you don't have to upgrade boto to + use new EMR features). You can also delete an API parameter + by setting it to None. + + :type visible_to_all_users: bool + :param visible_to_all_users: Whether the job flow is visible to all IAM + users of the AWS account associated with the job flow. If this + value is set to ``True``, all IAM users of that AWS + account can view and (if they have the proper policy permissions + set) manage the job flow. If it is set to ``False``, only + the IAM user that created the job flow can view and manage + it. + + :type job_flow_role: str + :param job_flow_role: An IAM role for the job flow. The EC2 + instances of the job flow assume this role. The default role is + ``EMRJobflowDefault``. In order to use the default role, + you must have already created it using the CLI. + + :type service_role: str + :param service_role: The IAM role that will be assumed by the Amazon + EMR service to access AWS resources on your behalf. + + :rtype: str + :return: The jobflow id + """ + params = {} + if action_on_failure: + params['ActionOnFailure'] = action_on_failure + if log_uri: + params['LogUri'] = log_uri + params['Name'] = name + + # Common instance args + common_params = self._build_instance_common_args(ec2_keyname, + availability_zone, + keep_alive, + hadoop_version) + params.update(common_params) + + # NB: according to the AWS API's error message, we must + # "configure instances either using instance count, master and + # slave instance type or instance groups but not both." + # + # Thus we switch here on the truthiness of instance_groups. + if not instance_groups: + # Instance args (the common case) + instance_params = self._build_instance_count_and_type_args( + master_instance_type, + slave_instance_type, + num_instances) + params.update(instance_params) + else: + # Instance group args (for spot instances or a heterogenous cluster) + list_args = self._build_instance_group_list_args(instance_groups) + instance_params = dict( + ('Instances.%s' % k, v) for k, v in six.iteritems(list_args) + ) + params.update(instance_params) + + # Debugging step from EMR API docs + if enable_debugging: + debugging_step = JarStep(name='Setup Hadoop Debugging', + action_on_failure='TERMINATE_JOB_FLOW', + main_class=None, + jar=self.DebuggingJar, + step_args=self.DebuggingArgs) + steps.insert(0, debugging_step) + + # Step args + if steps: + step_args = [self._build_step_args(step) for step in steps] + params.update(self._build_step_list(step_args)) + + if bootstrap_actions: + bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions] + params.update(self._build_bootstrap_action_list(bootstrap_action_args)) + + if ami_version: + params['AmiVersion'] = ami_version + + if additional_info is not None: + params['AdditionalInfo'] = additional_info + + if api_params: + for key, value in six.iteritems(api_params): + if value is None: + params.pop(key, None) + else: + params[key] = value + + if visible_to_all_users is not None: + if visible_to_all_users: + params['VisibleToAllUsers'] = 'true' + else: + params['VisibleToAllUsers'] = 'false' + + if job_flow_role is not None: + params['JobFlowRole'] = job_flow_role + + if service_role is not None: + params['ServiceRole'] = service_role + + response = self.get_object( + 'RunJobFlow', params, RunJobFlowResponse, verb='POST') + return response.jobflowid + + def set_termination_protection(self, jobflow_id, + termination_protection_status): + """ + Set termination protection on specified Elastic MapReduce job flows + + :type jobflow_ids: list or str + :param jobflow_ids: A list of job flow IDs + + :type termination_protection_status: bool + :param termination_protection_status: Termination protection status + """ + assert termination_protection_status in (True, False) + + params = {} + params['TerminationProtected'] = (termination_protection_status and "true") or "false" + self.build_list_params(params, [jobflow_id], 'JobFlowIds.member') + + return self.get_status('SetTerminationProtection', params, verb='POST') + + def set_visible_to_all_users(self, jobflow_id, visibility): + """ + Set whether specified Elastic Map Reduce job flows are visible to all IAM users + + :type jobflow_ids: list or str + :param jobflow_ids: A list of job flow IDs + + :type visibility: bool + :param visibility: Visibility + """ + assert visibility in (True, False) + + params = {} + params['VisibleToAllUsers'] = (visibility and "true") or "false" + self.build_list_params(params, [jobflow_id], 'JobFlowIds.member') + + return self.get_status('SetVisibleToAllUsers', params, verb='POST') + + def _build_bootstrap_action_args(self, bootstrap_action): + bootstrap_action_params = {} + bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path + + try: + bootstrap_action_params['Name'] = bootstrap_action.name + except AttributeError: + pass + + args = bootstrap_action.args() + if args: + self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member') + + return bootstrap_action_params + + def _build_step_args(self, step): + step_params = {} + step_params['ActionOnFailure'] = step.action_on_failure + step_params['HadoopJarStep.Jar'] = step.jar() + + main_class = step.main_class() + if main_class: + step_params['HadoopJarStep.MainClass'] = main_class + + args = step.args() + if args: + self.build_list_params(step_params, args, 'HadoopJarStep.Args.member') + + step_params['Name'] = step.name + return step_params + + def _build_bootstrap_action_list(self, bootstrap_actions): + if not isinstance(bootstrap_actions, list): + bootstrap_actions = [bootstrap_actions] + + params = {} + for i, bootstrap_action in enumerate(bootstrap_actions): + for key, value in six.iteritems(bootstrap_action): + params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value + return params + + def _build_step_list(self, steps): + if not isinstance(steps, list): + steps = [steps] + + params = {} + for i, step in enumerate(steps): + for key, value in six.iteritems(step): + params['Steps.member.%s.%s' % (i+1, key)] = value + return params + + def _build_string_list(self, field, items): + if not isinstance(items, list): + items = [items] + + params = {} + for i, item in enumerate(items): + params['%s.member.%s' % (field, i + 1)] = item + return params + + def _build_tag_list(self, tags): + assert isinstance(tags, dict) + + params = {} + for i, key_value in enumerate(sorted(six.iteritems(tags)), start=1): + key, value = key_value + current_prefix = 'Tags.member.%s' % i + params['%s.Key' % current_prefix] = key + if value: + params['%s.Value' % current_prefix] = value + return params + + def _build_instance_common_args(self, ec2_keyname, availability_zone, + keep_alive, hadoop_version): + """ + Takes a number of parameters used when starting a jobflow (as + specified in run_jobflow() above). Returns a comparable dict for + use in making a RunJobFlow request. + """ + params = { + 'Instances.KeepJobFlowAliveWhenNoSteps': str(keep_alive).lower(), + } + + if hadoop_version: + params['Instances.HadoopVersion'] = hadoop_version + if ec2_keyname: + params['Instances.Ec2KeyName'] = ec2_keyname + if availability_zone: + params['Instances.Placement.AvailabilityZone'] = availability_zone + + return params + + def _build_instance_count_and_type_args(self, master_instance_type, + slave_instance_type, num_instances): + """ + Takes a master instance type (string), a slave instance type + (string), and a number of instances. Returns a comparable dict + for use in making a RunJobFlow request. + """ + params = {'Instances.MasterInstanceType': master_instance_type, + 'Instances.SlaveInstanceType': slave_instance_type, + 'Instances.InstanceCount': num_instances} + return params + + def _build_instance_group_args(self, instance_group): + """ + Takes an InstanceGroup; returns a dict that, when its keys are + properly prefixed, can be used for describing InstanceGroups in + RunJobFlow or AddInstanceGroups requests. + """ + params = {'InstanceCount': instance_group.num_instances, + 'InstanceRole': instance_group.role, + 'InstanceType': instance_group.type, + 'Name': instance_group.name, + 'Market': instance_group.market} + if instance_group.market == 'SPOT': + params['BidPrice'] = instance_group.bidprice + return params + + def _build_instance_group_list_args(self, instance_groups): + """ + Takes a list of InstanceGroups, or a single InstanceGroup. Returns + a comparable dict for use in making a RunJobFlow or AddInstanceGroups + request. + """ + if not isinstance(instance_groups, list): + instance_groups = [instance_groups] + + params = {} + for i, instance_group in enumerate(instance_groups): + ig_dict = self._build_instance_group_args(instance_group) + for key, value in six.iteritems(ig_dict): + params['InstanceGroups.member.%d.%s' % (i+1, key)] = value + return params diff --git a/desktop/core/ext-py/boto-2.38.0/boto/emr/emrobject.py b/desktop/core/ext-py/boto-2.38.0/boto/emr/emrobject.py new file mode 100644 index 0000000000000000000000000000000000000000..73f7060ba17aaa0999aebc9bf7760e8cd084e27d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/emr/emrobject.py @@ -0,0 +1,511 @@ +# Copyright (c) 2010 Spotify AB +# Copyright (c) 2010 Jeremy Thurgood +# Copyright (c) 2010-2011 Yelp +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +This module contains EMR response objects +""" + +from boto.resultset import ResultSet + + +class EmrObject(object): + Fields = set() + + def __init__(self, connection=None): + self.connection = connection + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name in self.Fields: + setattr(self, name.lower(), value) + + +class RunJobFlowResponse(EmrObject): + Fields = set(['JobFlowId']) + +class AddInstanceGroupsResponse(EmrObject): + Fields = set(['InstanceGroupIds', 'JobFlowId']) + +class ModifyInstanceGroupsResponse(EmrObject): + Fields = set(['RequestId']) + + +class Arg(EmrObject): + def __init__(self, connection=None): + self.value = None + + def endElement(self, name, value, connection): + self.value = value + + +class StepId(Arg): + pass + + +class SupportedProduct(Arg): + pass + + +class JobFlowStepList(EmrObject): + def __ini__(self, connection=None): + self.connection = connection + self.stepids = None + + def startElement(self, name, attrs, connection): + if name == 'StepIds': + self.stepids = ResultSet([('member', StepId)]) + return self.stepids + else: + return None + + +class BootstrapAction(EmrObject): + Fields = set([ + 'Args', + 'Name', + 'Path', + 'ScriptPath', + ]) + + def startElement(self, name, attrs, connection): + if name == 'Args': + self.args = ResultSet([('member', Arg)]) + return self.args + + +class KeyValue(EmrObject): + Fields = set([ + 'Key', + 'Value', + ]) + + +class Step(EmrObject): + Fields = set([ + 'ActionOnFailure', + 'CreationDateTime', + 'EndDateTime', + 'Jar', + 'LastStateChangeReason', + 'MainClass', + 'Name', + 'StartDateTime', + 'State', + ]) + + def __init__(self, connection=None): + self.connection = connection + self.args = None + + def startElement(self, name, attrs, connection): + if name == 'Args': + self.args = ResultSet([('member', Arg)]) + return self.args + if name == 'Properties': + self.properties = ResultSet([('member', KeyValue)]) + return self.properties + + +class InstanceGroup(EmrObject): + Fields = set([ + 'BidPrice', + 'CreationDateTime', + 'EndDateTime', + 'InstanceGroupId', + 'InstanceRequestCount', + 'InstanceRole', + 'InstanceRunningCount', + 'InstanceType', + 'LastStateChangeReason', + 'LaunchGroup', + 'Market', + 'Name', + 'ReadyDateTime', + 'StartDateTime', + 'State', + ]) + + +class JobFlow(EmrObject): + Fields = set([ + 'AmiVersion', + 'AvailabilityZone', + 'CreationDateTime', + 'Ec2KeyName', + 'EndDateTime', + 'HadoopVersion', + 'Id', + 'InstanceCount', + 'JobFlowId', + 'KeepJobFlowAliveWhenNoSteps', + 'LastStateChangeReason', + 'LogUri', + 'MasterInstanceId', + 'MasterInstanceType', + 'MasterPublicDnsName', + 'Name', + 'NormalizedInstanceHours', + 'ReadyDateTime', + 'RequestId', + 'SlaveInstanceType', + 'StartDateTime', + 'State', + 'TerminationProtected', + 'Type', + 'Value', + 'VisibleToAllUsers', + ]) + + def __init__(self, connection=None): + self.connection = connection + self.steps = None + self.instancegroups = None + self.bootstrapactions = None + + def startElement(self, name, attrs, connection): + if name == 'Steps': + self.steps = ResultSet([('member', Step)]) + return self.steps + elif name == 'InstanceGroups': + self.instancegroups = ResultSet([('member', InstanceGroup)]) + return self.instancegroups + elif name == 'BootstrapActions': + self.bootstrapactions = ResultSet([('member', BootstrapAction)]) + return self.bootstrapactions + elif name == 'SupportedProducts': + self.supported_products = ResultSet([('member', SupportedProduct)]) + return self.supported_products + else: + return None + + +class ClusterTimeline(EmrObject): + Fields = set([ + 'CreationDateTime', + 'ReadyDateTime', + 'EndDateTime' + ]) + +class ClusterStateChangeReason(EmrObject): + Fields = set([ + 'Code', + 'Message' + ]) + +class ClusterStatus(EmrObject): + Fields = set([ + 'State', + 'StateChangeReason', + 'Timeline' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.timeline = None + + def startElement(self, name, attrs, connection): + if name == 'Timeline': + self.timeline = ClusterTimeline() + return self.timeline + elif name == 'StateChangeReason': + self.statechangereason = ClusterStateChangeReason() + return self.statechangereason + else: + return None + + +class Ec2InstanceAttributes(EmrObject): + Fields = set([ + 'Ec2KeyName', + 'Ec2SubnetId', + 'Ec2AvailabilityZone', + 'IamInstanceProfile' + ]) + + +class Application(EmrObject): + Fields = set([ + 'Name', + 'Version', + 'Args', + 'AdditionalInfo' + ]) + + +class Cluster(EmrObject): + Fields = set([ + 'Id', + 'Name', + 'LogUri', + 'RequestedAmiVersion', + 'RunningAmiVersion', + 'AutoTerminate', + 'TerminationProtected', + 'VisibleToAllUsers', + 'MasterPublicDnsName', + 'NormalizedInstanceHours', + 'ServiceRole' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.status = None + self.ec2instanceattributes = None + self.applications = None + self.tags = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + elif name == 'Ec2InstanceAttributes': + self.ec2instanceattributes = Ec2InstanceAttributes() + return self.ec2instanceattributes + elif name == 'Applications': + self.applications = ResultSet([('member', Application)]) + return self.applications + elif name == 'Tags': + self.tags = ResultSet([('member', KeyValue)]) + return self.tags + else: + return None + + +class ClusterSummary(EmrObject): + Fields = set([ + 'Id', + 'Name', + 'NormalizedInstanceHours' + ]) + + def __init__(self, connection): + self.connection = connection + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + + +class ClusterSummaryList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection): + self.connection = connection + self.clusters = None + + def startElement(self, name, attrs, connection): + if name == 'Clusters': + self.clusters = ResultSet([('member', ClusterSummary)]) + return self.clusters + else: + return None + + +class StepConfig(EmrObject): + Fields = set([ + 'Jar', + 'MainClass' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.properties = None + self.args = None + + def startElement(self, name, attrs, connection): + if name == 'Properties': + self.properties = ResultSet([('member', KeyValue)]) + return self.properties + elif name == 'Args': + self.args = ResultSet([('member', Arg)]) + return self.args + else: + return None + + +class HadoopStep(EmrObject): + Fields = set([ + 'Id', + 'Name', + 'ActionOnFailure' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.config = None + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Config': + self.config = StepConfig() + return self.config + elif name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + + + +class InstanceGroupInfo(EmrObject): + Fields = set([ + 'Id', + 'Name', + 'Market', + 'InstanceGroupType', + 'BidPrice', + 'InstanceType', + 'RequestedInstanceCount', + 'RunningInstanceCount' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + + +class InstanceGroupList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.instancegroups = None + + def startElement(self, name, attrs, connection): + if name == 'InstanceGroups': + self.instancegroups = ResultSet([('member', InstanceGroupInfo)]) + return self.instancegroups + else: + return None + + +class InstanceInfo(EmrObject): + Fields = set([ + 'Id', + 'Ec2InstanceId', + 'PublicDnsName', + 'PublicIpAddress', + 'PrivateDnsName', + 'PrivateIpAddress' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + + +class InstanceList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.instances = None + + def startElement(self, name, attrs, connection): + if name == 'Instances': + self.instances = ResultSet([('member', InstanceInfo)]) + return self.instances + else: + return None + + +class StepSummary(EmrObject): + Fields = set([ + 'Id', + 'Name' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.status = None + self.config = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + elif name == 'Config': + self.config = StepConfig() + return self.config + else: + return None + + +class StepSummaryList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.steps = None + + def startElement(self, name, attrs, connection): + if name == 'Steps': + self.steps = ResultSet([('member', StepSummary)]) + return self.steps + else: + return None + + +class BootstrapActionList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.actions = None + + def startElement(self, name, attrs, connection): + if name == 'BootstrapActions': + self.actions = ResultSet([('member', BootstrapAction)]) + return self.actions + else: + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/emr/instance_group.py b/desktop/core/ext-py/boto-2.38.0/boto/emr/instance_group.py new file mode 100644 index 0000000000000000000000000000000000000000..6ab63c5d5a42c9c632ae4a35f25c656d0ea06e29 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/emr/instance_group.py @@ -0,0 +1,43 @@ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class InstanceGroup(object): + def __init__(self, num_instances, role, type, market, name, bidprice=None): + self.num_instances = num_instances + self.role = role + self.type = type + self.market = market + self.name = name + if market == 'SPOT': + if not bidprice: + raise ValueError('bidprice must be specified if market == SPOT') + self.bidprice = str(bidprice) + + def __repr__(self): + if self.market == 'SPOT': + return '%s.%s(name=%r, num_instances=%r, role=%r, type=%r, market = %r, bidprice = %r)' % ( + self.__class__.__module__, self.__class__.__name__, + self.name, self.num_instances, self.role, self.type, self.market, + self.bidprice) + else: + return '%s.%s(name=%r, num_instances=%r, role=%r, type=%r, market = %r)' % ( + self.__class__.__module__, self.__class__.__name__, + self.name, self.num_instances, self.role, self.type, self.market) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/emr/step.py b/desktop/core/ext-py/boto-2.38.0/boto/emr/step.py new file mode 100644 index 0000000000000000000000000000000000000000..de6835fb4ebdacaa0328e7a1d562b4820eb77c9d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/emr/step.py @@ -0,0 +1,283 @@ +# Copyright (c) 2010 Spotify AB +# Copyright (c) 2010-2011 Yelp +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.compat import six + + +class Step(object): + """ + Jobflow Step base class + """ + def jar(self): + """ + :rtype: str + :return: URI to the jar + """ + raise NotImplemented() + + def args(self): + """ + :rtype: list(str) + :return: List of arguments for the step + """ + raise NotImplemented() + + def main_class(self): + """ + :rtype: str + :return: The main class name + """ + raise NotImplemented() + + +class JarStep(Step): + """ + Custom jar step + """ + def __init__(self, name, jar, main_class=None, + action_on_failure='TERMINATE_JOB_FLOW', step_args=None): + """ + A elastic mapreduce step that executes a jar + + :type name: str + :param name: The name of the step + :type jar: str + :param jar: S3 URI to the Jar file + :type main_class: str + :param main_class: The class to execute in the jar + :type action_on_failure: str + :param action_on_failure: An action, defined in the EMR docs to + take on failure. + :type step_args: list(str) + :param step_args: A list of arguments to pass to the step + """ + self.name = name + self._jar = jar + self._main_class = main_class + self.action_on_failure = action_on_failure + + if isinstance(step_args, six.string_types): + step_args = [step_args] + + self.step_args = step_args + + def jar(self): + return self._jar + + def args(self): + args = [] + + if self.step_args: + args.extend(self.step_args) + + return args + + def main_class(self): + return self._main_class + + +class StreamingStep(Step): + """ + Hadoop streaming step + """ + def __init__(self, name, mapper, reducer=None, combiner=None, + action_on_failure='TERMINATE_JOB_FLOW', + cache_files=None, cache_archives=None, + step_args=None, input=None, output=None, + jar='/home/hadoop/contrib/streaming/hadoop-streaming.jar'): + """ + A hadoop streaming elastic mapreduce step + + :type name: str + :param name: The name of the step + :type mapper: str + :param mapper: The mapper URI + :type reducer: str + :param reducer: The reducer URI + :type combiner: str + :param combiner: The combiner URI. Only works for Hadoop 0.20 + and later! + :type action_on_failure: str + :param action_on_failure: An action, defined in the EMR docs to + take on failure. + :type cache_files: list(str) + :param cache_files: A list of cache files to be bundled with the job + :type cache_archives: list(str) + :param cache_archives: A list of jar archives to be bundled with + the job + :type step_args: list(str) + :param step_args: A list of arguments to pass to the step + :type input: str or a list of str + :param input: The input uri + :type output: str + :param output: The output uri + :type jar: str + :param jar: The hadoop streaming jar. This can be either a local + path on the master node, or an s3:// URI. + """ + self.name = name + self.mapper = mapper + self.reducer = reducer + self.combiner = combiner + self.action_on_failure = action_on_failure + self.cache_files = cache_files + self.cache_archives = cache_archives + self.input = input + self.output = output + self._jar = jar + + if isinstance(step_args, six.string_types): + step_args = [step_args] + + self.step_args = step_args + + def jar(self): + return self._jar + + def main_class(self): + return None + + def args(self): + args = [] + + # put extra args BEFORE -mapper and -reducer so that e.g. -libjar + # will work + if self.step_args: + args.extend(self.step_args) + + args.extend(['-mapper', self.mapper]) + + if self.combiner: + args.extend(['-combiner', self.combiner]) + + if self.reducer: + args.extend(['-reducer', self.reducer]) + else: + args.extend(['-jobconf', 'mapred.reduce.tasks=0']) + + if self.input: + if isinstance(self.input, list): + for input in self.input: + args.extend(('-input', input)) + else: + args.extend(('-input', self.input)) + if self.output: + args.extend(('-output', self.output)) + + if self.cache_files: + for cache_file in self.cache_files: + args.extend(('-cacheFile', cache_file)) + + if self.cache_archives: + for cache_archive in self.cache_archives: + args.extend(('-cacheArchive', cache_archive)) + + return args + + def __repr__(self): + return '%s.%s(name=%r, mapper=%r, reducer=%r, action_on_failure=%r, cache_files=%r, cache_archives=%r, step_args=%r, input=%r, output=%r, jar=%r)' % ( + self.__class__.__module__, self.__class__.__name__, + self.name, self.mapper, self.reducer, self.action_on_failure, + self.cache_files, self.cache_archives, self.step_args, + self.input, self.output, self._jar) + + +class ScriptRunnerStep(JarStep): + + ScriptRunnerJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar' + + def __init__(self, name, **kw): + super(ScriptRunnerStep, self).__init__(name, self.ScriptRunnerJar, **kw) + + +class PigBase(ScriptRunnerStep): + + BaseArgs = ['s3n://us-east-1.elasticmapreduce/libs/pig/pig-script', + '--base-path', 's3n://us-east-1.elasticmapreduce/libs/pig/'] + + +class InstallPigStep(PigBase): + """ + Install pig on emr step + """ + + InstallPigName = 'Install Pig' + + def __init__(self, pig_versions='latest'): + step_args = [] + step_args.extend(self.BaseArgs) + step_args.extend(['--install-pig']) + step_args.extend(['--pig-versions', pig_versions]) + super(InstallPigStep, self).__init__(self.InstallPigName, step_args=step_args) + + +class PigStep(PigBase): + """ + Pig script step + """ + + def __init__(self, name, pig_file, pig_versions='latest', pig_args=[]): + step_args = [] + step_args.extend(self.BaseArgs) + step_args.extend(['--pig-versions', pig_versions]) + step_args.extend(['--run-pig-script', '--args', '-f', pig_file]) + step_args.extend(pig_args) + super(PigStep, self).__init__(name, step_args=step_args) + + +class HiveBase(ScriptRunnerStep): + + BaseArgs = ['s3n://us-east-1.elasticmapreduce/libs/hive/hive-script', + '--base-path', 's3n://us-east-1.elasticmapreduce/libs/hive/'] + + +class InstallHiveStep(HiveBase): + """ + Install Hive on EMR step + """ + InstallHiveName = 'Install Hive' + + def __init__(self, hive_versions='latest', hive_site=None): + step_args = [] + step_args.extend(self.BaseArgs) + step_args.extend(['--install-hive']) + step_args.extend(['--hive-versions', hive_versions]) + if hive_site is not None: + step_args.extend(['--hive-site=%s' % hive_site]) + super(InstallHiveStep, self).__init__(self.InstallHiveName, + step_args=step_args) + + +class HiveStep(HiveBase): + """ + Hive script step + """ + + def __init__(self, name, hive_file, hive_versions='latest', + hive_args=None): + step_args = [] + step_args.extend(self.BaseArgs) + step_args.extend(['--hive-versions', hive_versions]) + step_args.extend(['--run-hive-script', '--args', '-f', hive_file]) + if hive_args is not None: + step_args.extend(hive_args) + super(HiveStep, self).__init__(name, step_args=step_args) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/endpoints.json b/desktop/core/ext-py/boto-2.38.0/boto/endpoints.json new file mode 100644 index 0000000000000000000000000000000000000000..4e4afe84e109ab454a6204088c0da55f3b860667 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/endpoints.json @@ -0,0 +1,419 @@ +{ + "autoscaling": { + "ap-northeast-1": "autoscaling.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "autoscaling.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "autoscaling.ap-southeast-2.amazonaws.com", + "cn-north-1": "autoscaling.cn-north-1.amazonaws.com.cn", + "eu-west-1": "autoscaling.eu-west-1.amazonaws.com", + "sa-east-1": "autoscaling.sa-east-1.amazonaws.com", + "us-east-1": "autoscaling.us-east-1.amazonaws.com", + "us-gov-west-1": "autoscaling.us-gov-west-1.amazonaws.com", + "us-west-1": "autoscaling.us-west-1.amazonaws.com", + "us-west-2": "autoscaling.us-west-2.amazonaws.com", + "eu-central-1": "autoscaling.eu-central-1.amazonaws.com" + }, + "awslambda": { + "us-east-1": "lambda.us-east-1.amazonaws.com", + "us-west-2": "lambda.us-west-2.amazonaws.com", + "eu-west-1": "lambda.eu-west-1.amazonaws.com" + }, + "cloudformation": { + "ap-northeast-1": "cloudformation.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "cloudformation.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "cloudformation.ap-southeast-2.amazonaws.com", + "cn-north-1": "cloudformation.cn-north-1.amazonaws.com.cn", + "eu-west-1": "cloudformation.eu-west-1.amazonaws.com", + "sa-east-1": "cloudformation.sa-east-1.amazonaws.com", + "us-east-1": "cloudformation.us-east-1.amazonaws.com", + "us-gov-west-1": "cloudformation.us-gov-west-1.amazonaws.com", + "us-west-1": "cloudformation.us-west-1.amazonaws.com", + "us-west-2": "cloudformation.us-west-2.amazonaws.com", + "eu-central-1": "cloudformation.eu-central-1.amazonaws.com" + }, + "cloudfront": { + "ap-northeast-1": "cloudfront.amazonaws.com", + "ap-southeast-1": "cloudfront.amazonaws.com", + "ap-southeast-2": "cloudfront.amazonaws.com", + "eu-west-1": "cloudfront.amazonaws.com", + "sa-east-1": "cloudfront.amazonaws.com", + "us-east-1": "cloudfront.amazonaws.com", + "us-west-1": "cloudfront.amazonaws.com", + "us-west-2": "cloudfront.amazonaws.com", + "eu-central-1": "cloudfront.amazonaws.com" + }, + "cloudhsm": { + "us-east-1": "cloudhsm.us-east-1.amazonaws.com", + "us-west-2": "cloudhsm.us-west-2.amazonaws.com", + "eu-west-1": "cloudhsm.eu-west-1.amazonaws.com", + "eu-central-1": "cloudhsm.eu-central-1.amazonaws.com", + "ap-southeast-2": "cloudhsm.ap-southeast-2.amazonaws.com" + }, + "cloudsearch": { + "ap-southeast-1": "cloudsearch.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "cloudsearch.ap-southeast-2.amazonaws.com", + "ap-northeast-1": "cloudsearch.ap-northeast-1.amazonaws.com", + "sa-east-1": "cloudsearch.sa-east-1.amazonaws.com", + "eu-west-1": "cloudsearch.eu-west-1.amazonaws.com", + "us-east-1": "cloudsearch.us-east-1.amazonaws.com", + "us-west-1": "cloudsearch.us-west-1.amazonaws.com", + "us-west-2": "cloudsearch.us-west-2.amazonaws.com", + "eu-central-1": "cloudsearch.eu-central-1.amazonaws.com" + }, + "cloudsearchdomain": { + "ap-southeast-1": "cloudsearch.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "cloudsearch.ap-southeast-2.amazonaws.com", + "ap-northeast-1": "cloudsearch.ap-northeast-1.amazonaws.com", + "sa-east-1": "cloudsearch.sa-east-1.amazonaws.com", + "eu-west-1": "cloudsearch.eu-west-1.amazonaws.com", + "us-east-1": "cloudsearch.us-east-1.amazonaws.com", + "us-west-1": "cloudsearch.us-west-1.amazonaws.com", + "us-west-2": "cloudsearch.us-west-2.amazonaws.com", + "eu-central-1": "cloudsearch.eu-central-1.amazonaws.com" + }, + "cloudtrail": { + "ap-northeast-1": "cloudtrail.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "cloudtrail.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "cloudtrail.ap-southeast-2.amazonaws.com", + "eu-west-1": "cloudtrail.eu-west-1.amazonaws.com", + "sa-east-1": "cloudtrail.sa-east-1.amazonaws.com", + "us-east-1": "cloudtrail.us-east-1.amazonaws.com", + "us-west-1": "cloudtrail.us-west-1.amazonaws.com", + "us-west-2": "cloudtrail.us-west-2.amazonaws.com", + "eu-central-1": "cloudtrail.eu-central-1.amazonaws.com" + }, + "cloudwatch": { + "ap-northeast-1": "monitoring.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "monitoring.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "monitoring.ap-southeast-2.amazonaws.com", + "cn-north-1": "monitoring.cn-north-1.amazonaws.com.cn", + "eu-west-1": "monitoring.eu-west-1.amazonaws.com", + "sa-east-1": "monitoring.sa-east-1.amazonaws.com", + "us-east-1": "monitoring.us-east-1.amazonaws.com", + "us-gov-west-1": "monitoring.us-gov-west-1.amazonaws.com", + "us-west-1": "monitoring.us-west-1.amazonaws.com", + "us-west-2": "monitoring.us-west-2.amazonaws.com", + "eu-central-1": "monitoring.eu-central-1.amazonaws.com" + }, + "codedeploy": { + "us-east-1": "codedeploy.us-east-1.amazonaws.com", + "us-west-2": "codedeploy.us-west-2.amazonaws.com" + }, + "cognito-identity": { + "us-east-1": "cognito-identity.us-east-1.amazonaws.com" + }, + "cognito-sync": { + "us-east-1": "cognito-sync.us-east-1.amazonaws.com" + }, + "configservice": { + "us-east-1": "config.us-east-1.amazonaws.com", + "us-west-2": "config.us-west-2.amazonaws.com", + "eu-west-1": "config.eu-west-1.amazonaws.com", + "ap-southeast-2": "config.ap-southeast-2.amazonaws.com" + }, + "datapipeline": { + "us-east-1": "datapipeline.us-east-1.amazonaws.com", + "us-west-2": "datapipeline.us-west-2.amazonaws.com", + "eu-west-1": "datapipeline.eu-west-1.amazonaws.com", + "ap-southeast-2": "datapipeline.ap-southeast-2.amazonaws.com", + "ap-northeast-1": "datapipeline.ap-northeast-1.amazonaws.com" + }, + "directconnect": { + "ap-northeast-1": "directconnect.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "directconnect.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "directconnect.ap-southeast-2.amazonaws.com", + "eu-west-1": "directconnect.eu-west-1.amazonaws.com", + "sa-east-1": "directconnect.sa-east-1.amazonaws.com", + "us-east-1": "directconnect.us-east-1.amazonaws.com", + "us-west-1": "directconnect.us-west-1.amazonaws.com", + "us-west-2": "directconnect.us-west-2.amazonaws.com", + "eu-central-1": "directconnect.eu-central-1.amazonaws.com" + }, + "dynamodb": { + "ap-northeast-1": "dynamodb.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "dynamodb.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "dynamodb.ap-southeast-2.amazonaws.com", + "cn-north-1": "dynamodb.cn-north-1.amazonaws.com.cn", + "eu-west-1": "dynamodb.eu-west-1.amazonaws.com", + "sa-east-1": "dynamodb.sa-east-1.amazonaws.com", + "us-east-1": "dynamodb.us-east-1.amazonaws.com", + "us-gov-west-1": "dynamodb.us-gov-west-1.amazonaws.com", + "us-west-1": "dynamodb.us-west-1.amazonaws.com", + "us-west-2": "dynamodb.us-west-2.amazonaws.com", + "eu-central-1": "dynamodb.eu-central-1.amazonaws.com" + }, + "ec2": { + "ap-northeast-1": "ec2.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "ec2.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "ec2.ap-southeast-2.amazonaws.com", + "cn-north-1": "ec2.cn-north-1.amazonaws.com.cn", + "eu-west-1": "ec2.eu-west-1.amazonaws.com", + "sa-east-1": "ec2.sa-east-1.amazonaws.com", + "us-east-1": "ec2.us-east-1.amazonaws.com", + "us-gov-west-1": "ec2.us-gov-west-1.amazonaws.com", + "us-west-1": "ec2.us-west-1.amazonaws.com", + "us-west-2": "ec2.us-west-2.amazonaws.com", + "eu-central-1": "ec2.eu-central-1.amazonaws.com" + }, + "ec2containerservice": { + "us-east-1": "ecs.us-east-1.amazonaws.com" + }, + "elasticache": { + "ap-northeast-1": "elasticache.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "elasticache.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "elasticache.ap-southeast-2.amazonaws.com", + "cn-north-1": "elasticache.cn-north-1.amazonaws.com.cn", + "eu-west-1": "elasticache.eu-west-1.amazonaws.com", + "sa-east-1": "elasticache.sa-east-1.amazonaws.com", + "us-east-1": "elasticache.us-east-1.amazonaws.com", + "us-west-1": "elasticache.us-west-1.amazonaws.com", + "us-west-2": "elasticache.us-west-2.amazonaws.com", + "eu-central-1": "elasticache.eu-central-1.amazonaws.com" + }, + "elasticbeanstalk": { + "ap-northeast-1": "elasticbeanstalk.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "elasticbeanstalk.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "elasticbeanstalk.ap-southeast-2.amazonaws.com", + "eu-west-1": "elasticbeanstalk.eu-west-1.amazonaws.com", + "sa-east-1": "elasticbeanstalk.sa-east-1.amazonaws.com", + "us-east-1": "elasticbeanstalk.us-east-1.amazonaws.com", + "us-west-1": "elasticbeanstalk.us-west-1.amazonaws.com", + "us-west-2": "elasticbeanstalk.us-west-2.amazonaws.com", + "eu-central-1": "elasticbeanstalk.eu-central-1.amazonaws.com" + }, + "elasticloadbalancing": { + "ap-northeast-1": "elasticloadbalancing.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "elasticloadbalancing.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "elasticloadbalancing.ap-southeast-2.amazonaws.com", + "cn-north-1": "elasticloadbalancing.cn-north-1.amazonaws.com.cn", + "eu-west-1": "elasticloadbalancing.eu-west-1.amazonaws.com", + "sa-east-1": "elasticloadbalancing.sa-east-1.amazonaws.com", + "us-east-1": "elasticloadbalancing.us-east-1.amazonaws.com", + "us-gov-west-1": "elasticloadbalancing.us-gov-west-1.amazonaws.com", + "us-west-1": "elasticloadbalancing.us-west-1.amazonaws.com", + "us-west-2": "elasticloadbalancing.us-west-2.amazonaws.com", + "eu-central-1": "elasticloadbalancing.eu-central-1.amazonaws.com" + }, + "elasticmapreduce": { + "ap-northeast-1": "ap-northeast-1.elasticmapreduce.amazonaws.com", + "ap-southeast-1": "ap-southeast-1.elasticmapreduce.amazonaws.com", + "ap-southeast-2": "ap-southeast-2.elasticmapreduce.amazonaws.com", + "cn-north-1": "elasticmapreduce.cn-north-1.amazonaws.com.cn", + "eu-west-1": "elasticmapreduce.eu-west-1.amazonaws.com", + "sa-east-1": "sa-east-1.elasticmapreduce.amazonaws.com", + "us-east-1": "elasticmapreduce.us-east-1.amazonaws.com", + "us-gov-west-1": "us-gov-west-1.elasticmapreduce.amazonaws.com", + "us-west-1": "us-west-1.elasticmapreduce.amazonaws.com", + "us-west-2": "us-west-2.elasticmapreduce.amazonaws.com", + "eu-central-1": "elasticmapreduce.eu-central-1.amazonaws.com" + }, + "elastictranscoder": { + "ap-northeast-1": "elastictranscoder.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "elastictranscoder.ap-southeast-1.amazonaws.com", + "eu-west-1": "elastictranscoder.eu-west-1.amazonaws.com", + "us-east-1": "elastictranscoder.us-east-1.amazonaws.com", + "us-west-1": "elastictranscoder.us-west-1.amazonaws.com", + "us-west-2": "elastictranscoder.us-west-2.amazonaws.com", + "eu-central-1": "elastictranscoder.eu-central-1.amazonaws.com" + }, + "glacier": { + "ap-northeast-1": "glacier.ap-northeast-1.amazonaws.com", + "ap-southeast-2": "glacier.ap-southeast-2.amazonaws.com", + "cn-north-1": "glacier.cn-north-1.amazonaws.com.cn", + "eu-west-1": "glacier.eu-west-1.amazonaws.com", + "us-east-1": "glacier.us-east-1.amazonaws.com", + "us-west-1": "glacier.us-west-1.amazonaws.com", + "us-west-2": "glacier.us-west-2.amazonaws.com", + "eu-central-1": "glacier.eu-central-1.amazonaws.com", + "us-gov-west-1": "glacier.us-gov-west-1.amazonaws.com" + }, + "iam": { + "ap-northeast-1": "iam.amazonaws.com", + "ap-southeast-1": "iam.amazonaws.com", + "ap-southeast-2": "iam.amazonaws.com", + "cn-north-1": "iam.cn-north-1.amazonaws.com.cn", + "eu-central-1": "iam.amazonaws.com", + "eu-west-1": "iam.amazonaws.com", + "sa-east-1": "iam.amazonaws.com", + "us-east-1": "iam.amazonaws.com", + "us-gov-west-1": "iam.us-gov.amazonaws.com", + "us-west-1": "iam.amazonaws.com", + "us-west-2": "iam.amazonaws.com" + }, + "importexport": { + "ap-northeast-1": "importexport.amazonaws.com", + "ap-southeast-1": "importexport.amazonaws.com", + "ap-southeast-2": "importexport.amazonaws.com", + "eu-west-1": "importexport.amazonaws.com", + "sa-east-1": "importexport.amazonaws.com", + "us-east-1": "importexport.amazonaws.com", + "us-west-1": "importexport.amazonaws.com", + "us-west-2": "importexport.amazonaws.com" + }, + "kinesis": { + "us-east-1": "kinesis.us-east-1.amazonaws.com", + "us-west-2": "kinesis.us-west-2.amazonaws.com", + "eu-west-1": "kinesis.eu-west-1.amazonaws.com", + "ap-southeast-1": "kinesis.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "kinesis.ap-southeast-2.amazonaws.com", + "ap-northeast-1": "kinesis.ap-northeast-1.amazonaws.com", + "eu-central-1": "kinesis.eu-central-1.amazonaws.com" + }, + "kms": { + "us-east-1": "kms.us-east-1.amazonaws.com", + "us-west-1": "kms.us-west-1.amazonaws.com", + "us-west-2": "kms.us-west-2.amazonaws.com", + "eu-west-1": "kms.eu-west-1.amazonaws.com", + "eu-central-1": "kms.eu-central-1.amazonaws.com", + "ap-southeast-2": "kms.ap-southeast-2.amazonaws.com", + "ap-southeast-1": "kms.ap-southeast-1.amazonaws.com", + "ap-northeast-1": "kms.ap-northeast-1.amazonaws.com", + "sa-east-1": "kms.sa-east-1.amazonaws.com" + }, + "logs": { + "us-east-1": "logs.us-east-1.amazonaws.com", + "us-west-2": "logs.us-west-2.amazonaws.com", + "eu-west-1": "logs.eu-west-1.amazonaws.com", + "eu-central-1": "logs.eu-central-1.amazonaws.com" + }, + "opsworks": { + "us-east-1": "opsworks.us-east-1.amazonaws.com", + "eu-central-1": "opsworks.eu-central-1.amazonaws.com" + }, + "machinelearning": { + "us-east-1": "machinelearning.us-east-1.amazonaws.com", + "us-west-2": "machinelearning.us-west-2.amazonaws.com" + }, + "rds": { + "ap-northeast-1": "rds.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "rds.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "rds.ap-southeast-2.amazonaws.com", + "cn-north-1": "rds.cn-north-1.amazonaws.com.cn", + "eu-west-1": "rds.eu-west-1.amazonaws.com", + "sa-east-1": "rds.sa-east-1.amazonaws.com", + "us-east-1": "rds.amazonaws.com", + "us-gov-west-1": "rds.us-gov-west-1.amazonaws.com", + "us-west-1": "rds.us-west-1.amazonaws.com", + "us-west-2": "rds.us-west-2.amazonaws.com", + "eu-central-1": "rds.eu-central-1.amazonaws.com" + }, + "redshift": { + "ap-northeast-1": "redshift.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "redshift.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "redshift.ap-southeast-2.amazonaws.com", + "eu-west-1": "redshift.eu-west-1.amazonaws.com", + "us-east-1": "redshift.us-east-1.amazonaws.com", + "us-west-2": "redshift.us-west-2.amazonaws.com", + "eu-central-1": "redshift.eu-central-1.amazonaws.com" + }, + "route53": { + "ap-northeast-1": "route53.amazonaws.com", + "ap-southeast-1": "route53.amazonaws.com", + "ap-southeast-2": "route53.amazonaws.com", + "eu-central-1": "route53.amazonaws.com", + "eu-west-1": "route53.amazonaws.com", + "sa-east-1": "route53.amazonaws.com", + "us-east-1": "route53.amazonaws.com", + "us-west-1": "route53.amazonaws.com", + "us-west-2": "route53.amazonaws.com" + }, + "route53domains": { + "us-east-1": "route53domains.us-east-1.amazonaws.com" + }, + "s3": { + "ap-northeast-1": "s3-ap-northeast-1.amazonaws.com", + "ap-southeast-1": "s3-ap-southeast-1.amazonaws.com", + "ap-southeast-2": "s3-ap-southeast-2.amazonaws.com", + "cn-north-1": "s3.cn-north-1.amazonaws.com.cn", + "eu-west-1": "s3-eu-west-1.amazonaws.com", + "sa-east-1": "s3-sa-east-1.amazonaws.com", + "us-east-1": "s3.amazonaws.com", + "us-gov-west-1": "s3-us-gov-west-1.amazonaws.com", + "us-west-1": "s3-us-west-1.amazonaws.com", + "us-west-2": "s3-us-west-2.amazonaws.com", + "eu-central-1": "s3.eu-central-1.amazonaws.com" + }, + "sdb": { + "ap-northeast-1": "sdb.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "sdb.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "sdb.ap-southeast-2.amazonaws.com", + "eu-west-1": "sdb.eu-west-1.amazonaws.com", + "sa-east-1": "sdb.sa-east-1.amazonaws.com", + "us-east-1": "sdb.amazonaws.com", + "us-west-1": "sdb.us-west-1.amazonaws.com", + "us-west-2": "sdb.us-west-2.amazonaws.com", + "eu-central-1": "sdb.eu-central-1.amazonaws.com" + }, + "ses": { + "eu-west-1": "email.eu-west-1.amazonaws.com", + "us-east-1": "email.us-east-1.amazonaws.com", + "us-west-2": "email.us-west-2.amazonaws.com", + "eu-central-1": "email.eu-central-1.amazonaws.com" + }, + "sns": { + "ap-northeast-1": "sns.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "sns.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "sns.ap-southeast-2.amazonaws.com", + "cn-north-1": "sns.cn-north-1.amazonaws.com.cn", + "eu-west-1": "sns.eu-west-1.amazonaws.com", + "sa-east-1": "sns.sa-east-1.amazonaws.com", + "us-east-1": "sns.us-east-1.amazonaws.com", + "us-gov-west-1": "sns.us-gov-west-1.amazonaws.com", + "us-west-1": "sns.us-west-1.amazonaws.com", + "us-west-2": "sns.us-west-2.amazonaws.com", + "eu-central-1": "sns.eu-central-1.amazonaws.com" + }, + "sqs": { + "ap-northeast-1": "ap-northeast-1.queue.amazonaws.com", + "ap-southeast-1": "ap-southeast-1.queue.amazonaws.com", + "ap-southeast-2": "ap-southeast-2.queue.amazonaws.com", + "cn-north-1": "cn-north-1.queue.amazonaws.com.cn", + "eu-west-1": "eu-west-1.queue.amazonaws.com", + "sa-east-1": "sa-east-1.queue.amazonaws.com", + "us-east-1": "queue.amazonaws.com", + "us-gov-west-1": "us-gov-west-1.queue.amazonaws.com", + "us-west-1": "us-west-1.queue.amazonaws.com", + "us-west-2": "us-west-2.queue.amazonaws.com", + "eu-central-1": "eu-central-1.queue.amazonaws.com" + }, + "storagegateway": { + "ap-northeast-1": "storagegateway.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "storagegateway.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "storagegateway.ap-southeast-2.amazonaws.com", + "eu-west-1": "storagegateway.eu-west-1.amazonaws.com", + "sa-east-1": "storagegateway.sa-east-1.amazonaws.com", + "us-east-1": "storagegateway.us-east-1.amazonaws.com", + "us-west-1": "storagegateway.us-west-1.amazonaws.com", + "us-west-2": "storagegateway.us-west-2.amazonaws.com", + "eu-central-1": "storagegateway.eu-central-1.amazonaws.com" + }, + "sts": { + "ap-northeast-1": "sts.amazonaws.com", + "ap-southeast-1": "sts.amazonaws.com", + "ap-southeast-2": "sts.amazonaws.com", + "cn-north-1": "sts.cn-north-1.amazonaws.com.cn", + "eu-west-1": "sts.amazonaws.com", + "sa-east-1": "sts.amazonaws.com", + "us-east-1": "sts.amazonaws.com", + "us-gov-west-1": "sts.us-gov-west-1.amazonaws.com", + "us-west-1": "sts.amazonaws.com", + "us-west-2": "sts.amazonaws.com", + "eu-central-1": "sts.amazonaws.com" + }, + "support": { + "us-east-1": "support.us-east-1.amazonaws.com", + "eu-central-1": "support.eu-central-1.amazonaws.com" + }, + "swf": { + "ap-northeast-1": "swf.ap-northeast-1.amazonaws.com", + "ap-southeast-1": "swf.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "swf.ap-southeast-2.amazonaws.com", + "cn-north-1": "swf.cn-north-1.amazonaws.com.cn", + "eu-west-1": "swf.eu-west-1.amazonaws.com", + "sa-east-1": "swf.sa-east-1.amazonaws.com", + "us-east-1": "swf.us-east-1.amazonaws.com", + "us-gov-west-1": "swf.us-gov-west-1.amazonaws.com", + "us-west-1": "swf.us-west-1.amazonaws.com", + "us-west-2": "swf.us-west-2.amazonaws.com", + "eu-central-1": "swf.eu-central-1.amazonaws.com" + } +} diff --git a/desktop/core/ext-py/boto-2.38.0/boto/exception.py b/desktop/core/ext-py/boto-2.38.0/boto/exception.py new file mode 100644 index 0000000000000000000000000000000000000000..36c226fa84e17a5c5ff1bd3b491c85748c878086 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/exception.py @@ -0,0 +1,573 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Exception classes - Subclassing allows you to check for specific errors +""" +import base64 +import xml.sax + +import boto + +from boto import handler +from boto.compat import json, StandardError +from boto.resultset import ResultSet + + +class BotoClientError(StandardError): + """ + General Boto Client error (error accessing AWS) + """ + def __init__(self, reason, *args): + super(BotoClientError, self).__init__(reason, *args) + self.reason = reason + + def __repr__(self): + return 'BotoClientError: %s' % self.reason + + def __str__(self): + return 'BotoClientError: %s' % self.reason + + +class SDBPersistenceError(StandardError): + pass + + +class StoragePermissionsError(BotoClientError): + """ + Permissions error when accessing a bucket or key on a storage service. + """ + pass + + +class S3PermissionsError(StoragePermissionsError): + """ + Permissions error when accessing a bucket or key on S3. + """ + pass + + +class GSPermissionsError(StoragePermissionsError): + """ + Permissions error when accessing a bucket or key on GS. + """ + pass + + +class BotoServerError(StandardError): + def __init__(self, status, reason, body=None, *args): + super(BotoServerError, self).__init__(status, reason, body, *args) + self.status = status + self.reason = reason + self.body = body or '' + self.request_id = None + self.error_code = None + self._error_message = None + self.message = '' + self.box_usage = None + + if isinstance(self.body, bytes): + try: + self.body = self.body.decode('utf-8') + except UnicodeDecodeError: + boto.log.debug('Unable to decode body from bytes!') + + # Attempt to parse the error response. If body isn't present, + # then just ignore the error response. + if self.body: + # Check if it looks like a ``dict``. + if hasattr(self.body, 'items'): + # It's not a string, so trying to parse it will fail. + # But since it's data, we can work with that. + self.request_id = self.body.get('RequestId', None) + + if 'Error' in self.body: + # XML-style + error = self.body.get('Error', {}) + self.error_code = error.get('Code', None) + self.message = error.get('Message', None) + else: + # JSON-style. + self.message = self.body.get('message', None) + else: + try: + h = handler.XmlHandlerWrapper(self, self) + h.parseString(self.body) + except (TypeError, xml.sax.SAXParseException): + # What if it's JSON? Let's try that. + try: + parsed = json.loads(self.body) + + if 'RequestId' in parsed: + self.request_id = parsed['RequestId'] + if 'Error' in parsed: + if 'Code' in parsed['Error']: + self.error_code = parsed['Error']['Code'] + if 'Message' in parsed['Error']: + self.message = parsed['Error']['Message'] + + except (TypeError, ValueError): + # Remove unparsable message body so we don't include garbage + # in exception. But first, save self.body in self.error_message + # because occasionally we get error messages from Eucalyptus + # that are just text strings that we want to preserve. + self.message = self.body + self.body = None + + def __getattr__(self, name): + if name == 'error_message': + return self.message + if name == 'code': + return self.error_code + raise AttributeError + + def __setattr__(self, name, value): + if name == 'error_message': + self.message = value + else: + super(BotoServerError, self).__setattr__(name, value) + + def __repr__(self): + return '%s: %s %s\n%s' % (self.__class__.__name__, + self.status, self.reason, self.body) + + def __str__(self): + return '%s: %s %s\n%s' % (self.__class__.__name__, + self.status, self.reason, self.body) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name in ('RequestId', 'RequestID'): + self.request_id = value + elif name == 'Code': + self.error_code = value + elif name == 'Message': + self.message = value + elif name == 'BoxUsage': + self.box_usage = value + return None + + def _cleanupParsedProperties(self): + self.request_id = None + self.error_code = None + self.message = None + self.box_usage = None + + +class ConsoleOutput(object): + def __init__(self, parent=None): + self.parent = parent + self.instance_id = None + self.timestamp = None + self.comment = None + self.output = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.instance_id = value + elif name == 'output': + self.output = base64.b64decode(value) + else: + setattr(self, name, value) + + +class StorageCreateError(BotoServerError): + """ + Error creating a bucket or key on a storage service. + """ + def __init__(self, status, reason, body=None): + self.bucket = None + super(StorageCreateError, self).__init__(status, reason, body) + + def endElement(self, name, value, connection): + if name == 'BucketName': + self.bucket = value + else: + return super(StorageCreateError, self).endElement(name, value, connection) + + +class S3CreateError(StorageCreateError): + """ + Error creating a bucket or key on S3. + """ + pass + + +class GSCreateError(StorageCreateError): + """ + Error creating a bucket or key on GS. + """ + pass + + +class StorageCopyError(BotoServerError): + """ + Error copying a key on a storage service. + """ + pass + + +class S3CopyError(StorageCopyError): + """ + Error copying a key on S3. + """ + pass + + +class GSCopyError(StorageCopyError): + """ + Error copying a key on GS. + """ + pass + + +class SQSError(BotoServerError): + """ + General Error on Simple Queue Service. + """ + def __init__(self, status, reason, body=None): + self.detail = None + self.type = None + super(SQSError, self).__init__(status, reason, body) + + def startElement(self, name, attrs, connection): + return super(SQSError, self).startElement(name, attrs, connection) + + def endElement(self, name, value, connection): + if name == 'Detail': + self.detail = value + elif name == 'Type': + self.type = value + else: + return super(SQSError, self).endElement(name, value, connection) + + def _cleanupParsedProperties(self): + super(SQSError, self)._cleanupParsedProperties() + for p in ('detail', 'type'): + setattr(self, p, None) + + +class SQSDecodeError(BotoClientError): + """ + Error when decoding an SQS message. + """ + def __init__(self, reason, message): + super(SQSDecodeError, self).__init__(reason, message) + self.message = message + + def __repr__(self): + return 'SQSDecodeError: %s' % self.reason + + def __str__(self): + return 'SQSDecodeError: %s' % self.reason + + +class StorageResponseError(BotoServerError): + """ + Error in response from a storage service. + """ + def __init__(self, status, reason, body=None): + self.resource = None + super(StorageResponseError, self).__init__(status, reason, body) + + def startElement(self, name, attrs, connection): + return super(StorageResponseError, self).startElement( + name, attrs, connection) + + def endElement(self, name, value, connection): + if name == 'Resource': + self.resource = value + else: + return super(StorageResponseError, self).endElement( + name, value, connection) + + def _cleanupParsedProperties(self): + super(StorageResponseError, self)._cleanupParsedProperties() + for p in ('resource'): + setattr(self, p, None) + + +class S3ResponseError(StorageResponseError): + """ + Error in response from S3. + """ + pass + + +class GSResponseError(StorageResponseError): + """ + Error in response from GS. + """ + pass + + +class EC2ResponseError(BotoServerError): + """ + Error in response from EC2. + """ + def __init__(self, status, reason, body=None): + self.errors = None + self._errorResultSet = [] + super(EC2ResponseError, self).__init__(status, reason, body) + self.errors = [ + (e.error_code, e.error_message) for e in self._errorResultSet] + if len(self.errors): + self.error_code, self.error_message = self.errors[0] + + def startElement(self, name, attrs, connection): + if name == 'Errors': + self._errorResultSet = ResultSet([('Error', _EC2Error)]) + return self._errorResultSet + else: + return None + + def endElement(self, name, value, connection): + if name == 'RequestID': + self.request_id = value + else: + return None # don't call subclass here + + def _cleanupParsedProperties(self): + super(EC2ResponseError, self)._cleanupParsedProperties() + self._errorResultSet = [] + for p in ('errors'): + setattr(self, p, None) + + +class JSONResponseError(BotoServerError): + """ + This exception expects the fully parsed and decoded JSON response + body to be passed as the body parameter. + + :ivar status: The HTTP status code. + :ivar reason: The HTTP reason message. + :ivar body: The Python dict that represents the decoded JSON + response body. + :ivar error_message: The full description of the AWS error encountered. + :ivar error_code: A short string that identifies the AWS error + (e.g. ConditionalCheckFailedException) + """ + def __init__(self, status, reason, body=None, *args): + self.status = status + self.reason = reason + self.body = body + if self.body: + self.error_message = self.body.get('message', None) + self.error_code = self.body.get('__type', None) + if self.error_code: + self.error_code = self.error_code.split('#')[-1] + + +class DynamoDBResponseError(JSONResponseError): + pass + + +class SWFResponseError(JSONResponseError): + pass + + +class EmrResponseError(BotoServerError): + """ + Error in response from EMR + """ + pass + + +class _EC2Error(object): + def __init__(self, connection=None): + self.connection = connection + self.error_code = None + self.error_message = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Code': + self.error_code = value + elif name == 'Message': + self.error_message = value + else: + return None + + +class SDBResponseError(BotoServerError): + """ + Error in responses from SDB. + """ + pass + + +class AWSConnectionError(BotoClientError): + """ + General error connecting to Amazon Web Services. + """ + pass + + +class StorageDataError(BotoClientError): + """ + Error receiving data from a storage service. + """ + pass + + +class S3DataError(StorageDataError): + """ + Error receiving data from S3. + """ + pass + + +class GSDataError(StorageDataError): + """ + Error receiving data from GS. + """ + pass + + +class InvalidUriError(Exception): + """Exception raised when URI is invalid.""" + + def __init__(self, message): + super(InvalidUriError, self).__init__(message) + self.message = message + + +class InvalidAclError(Exception): + """Exception raised when ACL XML is invalid.""" + + def __init__(self, message): + super(InvalidAclError, self).__init__(message) + self.message = message + + +class InvalidCorsError(Exception): + """Exception raised when CORS XML is invalid.""" + + def __init__(self, message): + super(InvalidCorsError, self).__init__(message) + self.message = message + + +class NoAuthHandlerFound(Exception): + """Is raised when no auth handlers were found ready to authenticate.""" + pass + + +class InvalidLifecycleConfigError(Exception): + """Exception raised when GCS lifecycle configuration XML is invalid.""" + + def __init__(self, message): + super(InvalidLifecycleConfigError, self).__init__(message) + self.message = message + + +# Enum class for resumable upload failure disposition. +class ResumableTransferDisposition(object): + # START_OVER means an attempt to resume an existing transfer failed, + # and a new resumable upload should be attempted (without delay). + START_OVER = 'START_OVER' + + # WAIT_BEFORE_RETRY means the resumable transfer failed but that it can + # be retried after a time delay within the current process. + WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY' + + # ABORT_CUR_PROCESS means the resumable transfer failed and that + # delaying/retrying within the current process will not help. If + # resumable transfer included a state tracker file the upload can be + # retried again later, in another process (e.g., a later run of gsutil). + ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS' + + # ABORT means the resumable transfer failed in a way that it does not + # make sense to continue in the current process, and further that the + # current tracker ID should not be preserved (in a tracker file if one + # was specified at resumable upload start time). If the user tries again + # later (e.g., a separate run of gsutil) it will get a new resumable + # upload ID. + ABORT = 'ABORT' + + +class ResumableUploadException(Exception): + """ + Exception raised for various resumable upload problems. + + self.disposition is of type ResumableTransferDisposition. + """ + + def __init__(self, message, disposition): + super(ResumableUploadException, self).__init__(message, disposition) + self.message = message + self.disposition = disposition + + def __repr__(self): + return 'ResumableUploadException("%s", %s)' % ( + self.message, self.disposition) + + +class ResumableDownloadException(Exception): + """ + Exception raised for various resumable download problems. + + self.disposition is of type ResumableTransferDisposition. + """ + + def __init__(self, message, disposition): + super(ResumableDownloadException, self).__init__(message, disposition) + self.message = message + self.disposition = disposition + + def __repr__(self): + return 'ResumableDownloadException("%s", %s)' % ( + self.message, self.disposition) + + +class TooManyRecordsException(Exception): + """ + Exception raised when a search of Route53 records returns more + records than requested. + """ + + def __init__(self, message): + super(TooManyRecordsException, self).__init__(message) + self.message = message + + +class PleaseRetryException(Exception): + """ + Indicates a request should be retried. + """ + def __init__(self, message, response=None): + self.message = message + self.response = response + + def __repr__(self): + return 'PleaseRetryException("%s", %s)' % ( + self.message, + self.response + ) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/file/README b/desktop/core/ext-py/boto-2.38.0/boto/file/README new file mode 100644 index 0000000000000000000000000000000000000000..af824554e162c99fb97846c01d64cfd31427f794 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/file/README @@ -0,0 +1,49 @@ +Handling of file:// URIs: + +This directory contains code to map basic boto connection, bucket, and key +operations onto files in the local filesystem, in support of file:// +URI operations. + +Bucket storage operations cannot be mapped completely onto a file system +because of the different naming semantics in these types of systems: the +former have a flat name space of objects within each named bucket; the +latter have a hierarchical name space of files, and nothing corresponding to +the notion of a bucket. The mapping we selected was guided by the desire +to achieve meaningful semantics for a useful subset of operations that can +be implemented polymorphically across both types of systems. We considered +several possibilities for mapping path names to bucket + object name: + +1) bucket = the file system root or local directory (for absolute vs +relative file:// URIs, respectively) and object = remainder of path. +We discarded this choice because the get_all_keys() method doesn't make +sense under this approach: Enumerating all files under the root or current +directory could include more than the caller intended. For example, +StorageUri("file:///usr/bin/X11/vim").get_all_keys() would enumerate all +files in the file system. + +2) bucket is treated mostly as an anonymous placeholder, with the object +name holding the URI path (minus the "file://" part). Two sub-options, +for object enumeration (the get_all_keys() call): + a) disallow get_all_keys(). This isn't great, as then the caller must + know the URI type before deciding whether to make this call. + b) return the single key for which this "bucket" was defined. + Note that this option means the app cannot use this API for listing + contents of the file system. While that makes the API less generally + useful, it avoids the potentially dangerous/unintended consequences + noted in option (1) above. + +We selected 2b, resulting in a class hierarchy where StorageUri is an abstract +class, with FileStorageUri and BucketStorageUri subclasses. + +Some additional notes: + +BucketStorageUri and FileStorageUri each implement these methods: + - clone_replace_name() creates a same-type URI with a + different object name - which is useful for various enumeration cases + (e.g., implementing wildcarding in a command line utility). + - names_container() determines if the given URI names a container for + multiple objects/files - i.e., a bucket or directory. + - names_singleton() determines if the given URI names an individual object + or file. + - is_file_uri() and is_cloud_uri() determine if the given URI is a + FileStorageUri or BucketStorageUri, respectively diff --git a/desktop/core/ext-py/boto-2.38.0/boto/file/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/file/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..837a164f4733c7a873a7ea0f5c863e533f2b06c2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/file/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto + +from boto.file.connection import FileConnection as Connection +from boto.file.key import Key +from boto.file.bucket import Bucket + +__all__ = ['Connection', 'Key', 'Bucket'] diff --git a/desktop/core/ext-py/boto-2.38.0/boto/file/bucket.py b/desktop/core/ext-py/boto-2.38.0/boto/file/bucket.py new file mode 100644 index 0000000000000000000000000000000000000000..d49755eaae87c08f94f8cea489d35b76c800355d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/file/bucket.py @@ -0,0 +1,112 @@ +# Copyright 2010 Google Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# File representation of bucket, for use with "file://" URIs. + +import os +from boto.file.key import Key +from boto.file.simpleresultset import SimpleResultSet +from boto.s3.bucketlistresultset import BucketListResultSet + +class Bucket(object): + def __init__(self, name, contained_key): + """Instantiate an anonymous file-based Bucket around a single key. + """ + self.name = name + self.contained_key = contained_key + + def __iter__(self): + return iter(BucketListResultSet(self)) + + def __str__(self): + return 'anonymous bucket for file://' + self.contained_key + + def delete_key(self, key_name, headers=None, + version_id=None, mfa_token=None): + """ + Deletes a key from the bucket. + + :type key_name: string + :param key_name: The key name to delete + + :type version_id: string + :param version_id: Unused in this subclass. + + :type mfa_token: tuple or list of strings + :param mfa_token: Unused in this subclass. + """ + os.remove(key_name) + + def get_all_keys(self, headers=None, **params): + """ + This method returns the single key around which this anonymous Bucket + was instantiated. + + :rtype: SimpleResultSet + :return: The result from file system listing the keys requested + + """ + key = Key(self.name, self.contained_key) + return SimpleResultSet([key]) + + def get_key(self, key_name, headers=None, version_id=None, + key_type=Key.KEY_REGULAR_FILE): + """ + Check to see if a particular key exists within the bucket. + Returns: An instance of a Key object or None + + :type key_name: string + :param key_name: The name of the key to retrieve + + :type version_id: string + :param version_id: Unused in this subclass. + + :type stream_type: integer + :param stream_type: Type of the Key - Regular File or input/output Stream + + :rtype: :class:`boto.file.key.Key` + :returns: A Key object from this bucket. + """ + if key_name == '-': + return Key(self.name, '-', key_type=Key.KEY_STREAM_READABLE) + else: + fp = open(key_name, 'rb') + return Key(self.name, key_name, fp) + + def new_key(self, key_name=None, key_type=Key.KEY_REGULAR_FILE): + """ + Creates a new key + + :type key_name: string + :param key_name: The name of the key to create + + :rtype: :class:`boto.file.key.Key` + :returns: An instance of the newly created key object + """ + if key_name == '-': + return Key(self.name, '-', key_type=Key.KEY_STREAM_WRITABLE) + else: + dir_name = os.path.dirname(key_name) + if dir_name and not os.path.exists(dir_name): + os.makedirs(dir_name) + fp = open(key_name, 'wb') + return Key(self.name, key_name, fp) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/file/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/file/connection.py new file mode 100755 index 0000000000000000000000000000000000000000..2507e2db0b62d7b27aef8aa4fddd3cddc2bdd76a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/file/connection.py @@ -0,0 +1,33 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# File representation of connection, for use with "file://" URIs. + +from boto.file.bucket import Bucket + +class FileConnection(object): + + def __init__(self, file_storage_uri): + # FileConnections are per-file storage URI. + self.file_storage_uri = file_storage_uri + + def get_bucket(self, bucket_name, validate=True, headers=None): + return Bucket(bucket_name, self.file_storage_uri.object_name) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/file/key.py b/desktop/core/ext-py/boto-2.38.0/boto/file/key.py new file mode 100755 index 0000000000000000000000000000000000000000..3ec345d464235ac918ef3c22b0b8f40812f23fcf --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/file/key.py @@ -0,0 +1,201 @@ +# Copyright 2010 Google Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# File representation of key, for use with "file://" URIs. + +import os, shutil +import sys + +from boto.compat import StringIO + +class Key(object): + + KEY_STREAM_READABLE = 0x01 + KEY_STREAM_WRITABLE = 0x02 + KEY_STREAM = (KEY_STREAM_READABLE | KEY_STREAM_WRITABLE) + KEY_REGULAR_FILE = 0x00 + + def __init__(self, bucket, name, fp=None, key_type=KEY_REGULAR_FILE): + self.bucket = bucket + self.full_path = name + if name == '-': + self.name = None + self.size = None + else: + self.name = name + self.size = os.stat(name).st_size + self.key_type = key_type + if key_type == self.KEY_STREAM_READABLE: + self.fp = sys.stdin + self.full_path = '' + elif key_type == self.KEY_STREAM_WRITABLE: + self.fp = sys.stdout + self.full_path = '' + else: + self.fp = fp + + def __str__(self): + return 'file://' + self.full_path + + def get_file(self, fp, headers=None, cb=None, num_cb=10, torrent=False): + """ + Retrieves a file from a Key + + :type fp: file + :param fp: File pointer to put the data into + + :type headers: string + :param: ignored in this subclass. + + :type cb: function + :param cb: ignored in this subclass. + + :type cb: int + :param num_cb: ignored in this subclass. + """ + if self.key_type & self.KEY_STREAM_WRITABLE: + raise BotoClientError('Stream is not readable') + elif self.key_type & self.KEY_STREAM_READABLE: + key_file = self.fp + else: + key_file = open(self.full_path, 'rb') + try: + shutil.copyfileobj(key_file, fp) + finally: + key_file.close() + + def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, + num_cb=10, policy=None, md5=None): + """ + Store an object in a file using the name of the Key object as the + key in file URI and the contents of the file pointed to by 'fp' as the + contents. + + :type fp: file + :param fp: the file whose contents to upload + + :type headers: dict + :param headers: ignored in this subclass. + + :type replace: bool + :param replace: If this parameter is False, the method + will first check to see if an object exists in the + bucket with the same key. If it does, it won't + overwrite it. The default value is True which will + overwrite the object. + + :type cb: function + :param cb: ignored in this subclass. + + :type cb: int + :param num_cb: ignored in this subclass. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: ignored in this subclass. + + :type md5: A tuple containing the hexdigest version of the MD5 checksum + of the file as the first element and the Base64-encoded + version of the plain checksum as the second element. + This is the same format returned by the compute_md5 method. + :param md5: ignored in this subclass. + """ + if self.key_type & self.KEY_STREAM_READABLE: + raise BotoClientError('Stream is not writable') + elif self.key_type & self.KEY_STREAM_WRITABLE: + key_file = self.fp + else: + if not replace and os.path.exists(self.full_path): + return + key_file = open(self.full_path, 'wb') + try: + shutil.copyfileobj(fp, key_file) + finally: + key_file.close() + + def get_contents_to_file(self, fp, headers=None, cb=None, num_cb=None, + torrent=False, version_id=None, + res_download_handler=None, response_headers=None): + """ + Copy contents from the current file to the file pointed to by 'fp'. + + :type fp: File-like object + :param fp: + + :type headers: dict + :param headers: Unused in this subclass. + + :type cb: function + :param cb: Unused in this subclass. + + :type cb: int + :param num_cb: Unused in this subclass. + + :type torrent: bool + :param torrent: Unused in this subclass. + + :type res_upload_handler: ResumableDownloadHandler + :param res_download_handler: Unused in this subclass. + + :type response_headers: dict + :param response_headers: Unused in this subclass. + """ + shutil.copyfileobj(self.fp, fp) + + def get_contents_as_string(self, headers=None, cb=None, num_cb=10, + torrent=False): + """ + Retrieve file data from the Key, and return contents as a string. + + :type headers: dict + :param headers: ignored in this subclass. + + :type cb: function + :param cb: ignored in this subclass. + + :type cb: int + :param num_cb: ignored in this subclass. + + :type cb: int + :param num_cb: ignored in this subclass. + + :type torrent: bool + :param torrent: ignored in this subclass. + + :rtype: string + :returns: The contents of the file as a string + """ + + fp = StringIO() + self.get_contents_to_file(fp) + return fp.getvalue() + + def is_stream(self): + return (self.key_type & self.KEY_STREAM) + + def close(self): + """ + Closes fp associated with underlying file. + Caller should call this method when done with this class, to avoid + using up OS resources (e.g., when iterating over a large number + of files). + """ + self.fp.close() diff --git a/desktop/core/ext-py/boto-2.38.0/boto/file/simpleresultset.py b/desktop/core/ext-py/boto-2.38.0/boto/file/simpleresultset.py new file mode 100755 index 0000000000000000000000000000000000000000..5f94dc116582cb066c2d6c763452ab1148d0fc8f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/file/simpleresultset.py @@ -0,0 +1,30 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class SimpleResultSet(list): + """ + ResultSet facade built from a simple list, rather than via XML parsing. + """ + + def __init__(self, input_list): + for x in input_list: + self.append(x) + self.is_truncated = False diff --git a/desktop/core/ext-py/boto-2.38.0/boto/fps/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/fps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d69b7f08a42b9f2e8574337e9d1ff29e1b1b1e40 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/fps/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2008, Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/boto/fps/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/fps/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..6dc90a248eb14206c0d1be924a71fbe4aeb5deb9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/fps/connection.py @@ -0,0 +1,395 @@ +# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# Copyright (c) 2010 Jason R. Coombs http://www.jaraco.com/ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import urllib +import uuid +from boto.connection import AWSQueryConnection +from boto.fps.exception import ResponseErrorFactory +from boto.fps.response import ResponseFactory +import boto.fps.response + +__all__ = ['FPSConnection'] + +decorated_attrs = ('action', 'response') + + +def add_attrs_from(func, to): + for attr in decorated_attrs: + setattr(to, attr, getattr(func, attr, None)) + return to + + +def complex_amounts(*fields): + def decorator(func): + def wrapper(self, *args, **kw): + for field in filter(kw.has_key, fields): + amount = kw.pop(field) + kw[field + '.Value'] = getattr(amount, 'Value', str(amount)) + kw[field + '.CurrencyCode'] = getattr(amount, 'CurrencyCode', + self.currencycode) + return func(self, *args, **kw) + wrapper.__doc__ = "{0}\nComplex Amounts: {1}".format(func.__doc__, + ', '.join(fields)) + return add_attrs_from(func, to=wrapper) + return decorator + + +def requires(*groups): + + def decorator(func): + + def wrapper(*args, **kw): + hasgroup = lambda x: len(x) == len(filter(kw.has_key, x)) + if 1 != len(filter(hasgroup, groups)): + message = ' OR '.join(['+'.join(g) for g in groups]) + message = "{0} requires {1} argument(s)" \ + "".format(getattr(func, 'action', 'Method'), message) + raise KeyError(message) + return func(*args, **kw) + message = ' OR '.join(['+'.join(g) for g in groups]) + wrapper.__doc__ = "{0}\nRequired: {1}".format(func.__doc__, + message) + return add_attrs_from(func, to=wrapper) + return decorator + + +def needs_caller_reference(func): + + def wrapper(*args, **kw): + kw.setdefault('CallerReference', uuid.uuid4()) + return func(*args, **kw) + wrapper.__doc__ = "{0}\nUses CallerReference, defaults " \ + "to uuid.uuid4()".format(func.__doc__) + return add_attrs_from(func, to=wrapper) + + +def api_action(*api): + + def decorator(func): + action = ''.join(api or map(str.capitalize, func.__name__.split('_'))) + response = ResponseFactory(action) + if hasattr(boto.fps.response, action + 'Response'): + response = getattr(boto.fps.response, action + 'Response') + + def wrapper(self, *args, **kw): + return func(self, action, response, *args, **kw) + wrapper.action, wrapper.response = action, response + wrapper.__doc__ = "FPS {0} API call\n{1}".format(action, + func.__doc__) + return wrapper + return decorator + + +class FPSConnection(AWSQueryConnection): + + APIVersion = '2010-08-28' + ResponseError = ResponseErrorFactory + currencycode = 'USD' + + def __init__(self, *args, **kw): + self.currencycode = kw.pop('CurrencyCode', self.currencycode) + kw.setdefault('host', 'fps.sandbox.amazonaws.com') + super(FPSConnection, self).__init__(*args, **kw) + + def _required_auth_capability(self): + return ['fps'] + + @needs_caller_reference + @complex_amounts('SettlementAmount') + @requires(['CreditInstrumentId', 'SettlementAmount.Value', + 'SenderTokenId', 'SettlementAmount.CurrencyCode']) + @api_action() + def settle_debt(self, action, response, **kw): + """ + Allows a caller to initiate a transaction that atomically transfers + money from a sender's payment instrument to the recipient, while + decreasing corresponding debt balance. + """ + return self.get_object(action, kw, response) + + @requires(['TransactionId']) + @api_action() + def get_transaction_status(self, action, response, **kw): + """ + Gets the latest status of a transaction. + """ + return self.get_object(action, kw, response) + + @requires(['StartDate']) + @api_action() + def get_account_activity(self, action, response, **kw): + """ + Returns transactions for a given date range. + """ + return self.get_object(action, kw, response) + + @requires(['TransactionId']) + @api_action() + def get_transaction(self, action, response, **kw): + """ + Returns all details of a transaction. + """ + return self.get_object(action, kw, response) + + @api_action() + def get_outstanding_debt_balance(self, action, response): + """ + Returns the total outstanding balance for all the credit instruments + for the given creditor account. + """ + return self.get_object(action, {}, response) + + @requires(['PrepaidInstrumentId']) + @api_action() + def get_prepaid_balance(self, action, response, **kw): + """ + Returns the balance available on the given prepaid instrument. + """ + return self.get_object(action, kw, response) + + @api_action() + def get_total_prepaid_liability(self, action, response): + """ + Returns the total liability held by the given account corresponding to + all the prepaid instruments owned by the account. + """ + return self.get_object(action, {}, response) + + @api_action() + def get_account_balance(self, action, response): + """ + Returns the account balance for an account in real time. + """ + return self.get_object(action, {}, response) + + @needs_caller_reference + @requires(['PaymentInstruction', 'TokenType']) + @api_action() + def install_payment_instruction(self, action, response, **kw): + """ + Installs a payment instruction for caller. + """ + return self.get_object(action, kw, response) + + @needs_caller_reference + @requires(['returnURL', 'pipelineName']) + def cbui_url(self, **kw): + """ + Generate a signed URL for the Co-Branded service API given arguments as + payload. + """ + sandbox = 'sandbox' in self.host and 'payments-sandbox' or 'payments' + endpoint = 'authorize.{0}.amazon.com'.format(sandbox) + base = '/cobranded-ui/actions/start' + + validpipelines = ('SingleUse', 'MultiUse', 'Recurring', 'Recipient', + 'SetupPrepaid', 'SetupPostpaid', 'EditToken') + assert kw['pipelineName'] in validpipelines, "Invalid pipelineName" + kw.update({ + 'signatureMethod': 'HmacSHA256', + 'signatureVersion': '2', + }) + kw.setdefault('callerKey', self.aws_access_key_id) + + safestr = lambda x: x is not None and str(x) or '' + safequote = lambda x: urllib.quote(safestr(x), safe='~') + payload = sorted([(k, safequote(v)) for k, v in kw.items()]) + + encoded = lambda p: '&'.join([k + '=' + v for k, v in p]) + canonical = '\n'.join(['GET', endpoint, base, encoded(payload)]) + signature = self._auth_handler.sign_string(canonical) + payload += [('signature', safequote(signature))] + payload.sort() + + return 'https://{0}{1}?{2}'.format(endpoint, base, encoded(payload)) + + @needs_caller_reference + @complex_amounts('TransactionAmount') + @requires(['SenderTokenId', 'TransactionAmount.Value', + 'TransactionAmount.CurrencyCode']) + @api_action() + def reserve(self, action, response, **kw): + """ + Reserve API is part of the Reserve and Settle API conjunction that + serve the purpose of a pay where the authorization and settlement have + a timing difference. + """ + return self.get_object(action, kw, response) + + @needs_caller_reference + @complex_amounts('TransactionAmount') + @requires(['SenderTokenId', 'TransactionAmount.Value', + 'TransactionAmount.CurrencyCode']) + @api_action() + def pay(self, action, response, **kw): + """ + Allows calling applications to move money from a sender to a recipient. + """ + return self.get_object(action, kw, response) + + @requires(['TransactionId']) + @api_action() + def cancel(self, action, response, **kw): + """ + Cancels an ongoing transaction and puts it in cancelled state. + """ + return self.get_object(action, kw, response) + + @complex_amounts('TransactionAmount') + @requires(['ReserveTransactionId', 'TransactionAmount.Value', + 'TransactionAmount.CurrencyCode']) + @api_action() + def settle(self, action, response, **kw): + """ + The Settle API is used in conjunction with the Reserve API and is used + to settle previously reserved transaction. + """ + return self.get_object(action, kw, response) + + @complex_amounts('RefundAmount') + @requires(['TransactionId', 'RefundAmount.Value', + 'CallerReference', 'RefundAmount.CurrencyCode']) + @api_action() + def refund(self, action, response, **kw): + """ + Refunds a previously completed transaction. + """ + return self.get_object(action, kw, response) + + @requires(['RecipientTokenId']) + @api_action() + def get_recipient_verification_status(self, action, response, **kw): + """ + Returns the recipient status. + """ + return self.get_object(action, kw, response) + + @requires(['CallerReference'], ['TokenId']) + @api_action() + def get_token_by_caller(self, action, response, **kw): + """ + Returns the details of a particular token installed by this calling + application using the subway co-branded UI. + """ + return self.get_object(action, kw, response) + + @requires(['UrlEndPoint', 'HttpParameters']) + @api_action() + def verify_signature(self, action, response, **kw): + """ + Verify the signature that FPS sent in IPN or callback urls. + """ + return self.get_object(action, kw, response) + + @api_action() + def get_tokens(self, action, response, **kw): + """ + Returns a list of tokens installed on the given account. + """ + return self.get_object(action, kw, response) + + @requires(['TokenId']) + @api_action() + def get_token_usage(self, action, response, **kw): + """ + Returns the usage of a token. + """ + return self.get_object(action, kw, response) + + @requires(['TokenId']) + @api_action() + def cancel_token(self, action, response, **kw): + """ + Cancels any token installed by the calling application on its own + account. + """ + return self.get_object(action, kw, response) + + @needs_caller_reference + @complex_amounts('FundingAmount') + @requires(['PrepaidInstrumentId', 'FundingAmount.Value', + 'SenderTokenId', 'FundingAmount.CurrencyCode']) + @api_action() + def fund_prepaid(self, action, response, **kw): + """ + Funds the prepaid balance on the given prepaid instrument. + """ + return self.get_object(action, kw, response) + + @requires(['CreditInstrumentId']) + @api_action() + def get_debt_balance(self, action, response, **kw): + """ + Returns the balance corresponding to the given credit instrument. + """ + return self.get_object(action, kw, response) + + @needs_caller_reference + @complex_amounts('AdjustmentAmount') + @requires(['CreditInstrumentId', 'AdjustmentAmount.Value', + 'AdjustmentAmount.CurrencyCode']) + @api_action() + def write_off_debt(self, action, response, **kw): + """ + Allows a creditor to write off the debt balance accumulated partially + or fully at any time. + """ + return self.get_object(action, kw, response) + + @requires(['SubscriptionId']) + @api_action() + def get_transactions_for_subscription(self, action, response, **kw): + """ + Returns the transactions for a given subscriptionID. + """ + return self.get_object(action, kw, response) + + @requires(['SubscriptionId']) + @api_action() + def get_subscription_details(self, action, response, **kw): + """ + Returns the details of Subscription for a given subscriptionID. + """ + return self.get_object(action, kw, response) + + @needs_caller_reference + @complex_amounts('RefundAmount') + @requires(['SubscriptionId']) + @api_action() + def cancel_subscription_and_refund(self, action, response, **kw): + """ + Cancels a subscription. + """ + message = "If you specify a RefundAmount, " \ + "you must specify CallerReference." + assert not 'RefundAmount.Value' in kw \ + or 'CallerReference' in kw, message + return self.get_object(action, kw, response) + + @requires(['TokenId']) + @api_action() + def get_payment_instruction(self, action, response, **kw): + """ + Gets the payment instruction of a token. + """ + return self.get_object(action, kw, response) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/fps/exception.py b/desktop/core/ext-py/boto-2.38.0/boto/fps/exception.py new file mode 100644 index 0000000000000000000000000000000000000000..bebb86b787b78a5f38bc422eeab99accfc71aa8b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/fps/exception.py @@ -0,0 +1,344 @@ +from boto.exception import BotoServerError + + +class ResponseErrorFactory(BotoServerError): + + def __new__(cls, *args, **kw): + error = BotoServerError(*args, **kw) + newclass = globals().get(error.error_code, ResponseError) + obj = newclass.__new__(newclass, *args, **kw) + obj.__dict__.update(error.__dict__) + return obj + + +class ResponseError(BotoServerError): + """Undefined response error. + """ + retry = False + + def __repr__(self): + return '{0}({1}, {2},\n\t{3})'.format(self.__class__.__name__, + self.status, self.reason, + self.error_message) + + def __str__(self): + return 'FPS Response Error: {0.status} {0.__class__.__name__} {1}\n' \ + '{2}\n' \ + '{0.error_message}'.format(self, + self.retry and '(Retriable)' or '', + self.__doc__.strip()) + + +class RetriableResponseError(ResponseError): + retry = True + + +class AccessFailure(RetriableResponseError): + """Account cannot be accessed. + """ + + +class AccountClosed(RetriableResponseError): + """Account is not active. + """ + + +class AccountLimitsExceeded(RetriableResponseError): + """The spending or receiving limit on the account is exceeded. + """ + + +class AmountOutOfRange(ResponseError): + """The transaction amount is more than the allowed range. + """ + + +class AuthFailure(RetriableResponseError): + """AWS was not able to validate the provided access credentials. + """ + + +class ConcurrentModification(RetriableResponseError): + """A retriable error can happen when two processes try to modify the + same data at the same time. + """ + + +class DuplicateRequest(ResponseError): + """A different request associated with this caller reference already + exists. + """ + + +class InactiveInstrument(ResponseError): + """Payment instrument is inactive. + """ + + +class IncompatibleTokens(ResponseError): + """The transaction could not be completed because the tokens have + incompatible payment instructions. + """ + + +class InstrumentAccessDenied(ResponseError): + """The external calling application is not the recipient for this + postpaid or prepaid instrument. + """ + + +class InstrumentExpired(ResponseError): + """The prepaid or the postpaid instrument has expired. + """ + + +class InsufficientBalance(RetriableResponseError): + """The sender, caller, or recipient's account balance has + insufficient funds to complete the transaction. + """ + + +class InternalError(RetriableResponseError): + """A retriable error that happens due to some transient problem in + the system. + """ + + +class InvalidAccountState(RetriableResponseError): + """The account is either suspended or closed. + """ + + +class InvalidAccountState_Caller(RetriableResponseError): + """The developer account cannot participate in the transaction. + """ + + +class InvalidAccountState_Recipient(RetriableResponseError): + """Recipient account cannot participate in the transaction. + """ + + +class InvalidAccountState_Sender(RetriableResponseError): + """Sender account cannot participate in the transaction. + """ + + +class InvalidCallerReference(ResponseError): + """The Caller Reference does not have a token associated with it. + """ + + +class InvalidClientTokenId(ResponseError): + """The AWS Access Key Id you provided does not exist in our records. + """ + + +class InvalidDateRange(ResponseError): + """The end date specified is before the start date or the start date + is in the future. + """ + + +class InvalidParams(ResponseError): + """One or more parameters in the request is invalid. + """ + + +class InvalidPaymentInstrument(ResponseError): + """The payment method used in the transaction is invalid. + """ + + +class InvalidPaymentMethod(ResponseError): + """Specify correct payment method. + """ + + +class InvalidRecipientForCCTransaction(ResponseError): + """This account cannot receive credit card payments. + """ + + +class InvalidSenderRoleForAccountType(ResponseError): + """This token cannot be used for this operation. + """ + + +class InvalidTokenId(ResponseError): + """You did not install the token that you are trying to cancel. + """ + + +class InvalidTokenId_Recipient(ResponseError): + """The recipient token specified is either invalid or canceled. + """ + + +class InvalidTokenId_Sender(ResponseError): + """The sender token specified is either invalid or canceled or the + token is not active. + """ + + +class InvalidTokenType(ResponseError): + """An invalid operation was performed on the token, for example, + getting the token usage information on a single use token. + """ + + +class InvalidTransactionId(ResponseError): + """The specified transaction could not be found or the caller did not + execute the transaction or this is not a Pay or Reserve call. + """ + + +class InvalidTransactionState(ResponseError): + """The transaction is not complete, or it has temporarily failed. + """ + + +class NotMarketplaceApp(RetriableResponseError): + """This is not an marketplace application or the caller does not + match either the sender or the recipient. + """ + + +class OriginalTransactionFailed(ResponseError): + """The original transaction has failed. + """ + + +class OriginalTransactionIncomplete(RetriableResponseError): + """The original transaction is still in progress. + """ + + +class PaymentInstrumentNotCC(ResponseError): + """The payment method specified in the transaction is not a credit + card. You can only use a credit card for this transaction. + """ + + +class PaymentMethodNotDefined(ResponseError): + """Payment method is not defined in the transaction. + """ + + +class PrepaidFundingLimitExceeded(RetriableResponseError): + """An attempt has been made to fund the prepaid instrument + at a level greater than its recharge limit. + """ + + +class RefundAmountExceeded(ResponseError): + """The refund amount is more than the refundable amount. + """ + + +class SameSenderAndRecipient(ResponseError): + """The sender and receiver are identical, which is not allowed. + """ + + +class SameTokenIdUsedMultipleTimes(ResponseError): + """This token is already used in earlier transactions. + """ + + +class SenderNotOriginalRecipient(ResponseError): + """The sender in the refund transaction is not + the recipient of the original transaction. + """ + + +class SettleAmountGreaterThanDebt(ResponseError): + """The amount being settled or written off is + greater than the current debt. + """ + + +class SettleAmountGreaterThanReserveAmount(ResponseError): + """The amount being settled is greater than the reserved amount. + """ + + +class SignatureDoesNotMatch(ResponseError): + """The request signature calculated by Amazon does not match the + signature you provided. + """ + + +class TokenAccessDenied(ResponseError): + """Permission to cancel the token is denied. + """ + + +class TokenNotActive(ResponseError): + """The token is canceled. + """ + + +class TokenNotActive_Recipient(ResponseError): + """The recipient token is canceled. + """ + + +class TokenNotActive_Sender(ResponseError): + """The sender token is canceled. + """ + + +class TokenUsageError(ResponseError): + """The token usage limit is exceeded. + """ + + +class TransactionDenied(ResponseError): + """The transaction is not allowed. + """ + + +class TransactionFullyRefundedAlready(ResponseError): + """The transaction has already been completely refunded. + """ + + +class TransactionTypeNotRefundable(ResponseError): + """You cannot refund this transaction. + """ + + +class UnverifiedAccount_Recipient(ResponseError): + """The recipient's account must have a verified bank account or a + credit card before this transaction can be initiated. + """ + + +class UnverifiedAccount_Sender(ResponseError): + """The sender's account must have a verified U.S. credit card or + a verified U.S bank account before this transaction can be + initiated. + """ + + +class UnverifiedBankAccount(ResponseError): + """A verified bank account should be used for this transaction. + """ + + +class UnverifiedEmailAddress_Caller(ResponseError): + """The caller account must have a verified email address. + """ + + +class UnverifiedEmailAddress_Recipient(ResponseError): + """The recipient account must have a verified + email address for receiving payments. + """ + + +class UnverifiedEmailAddress_Sender(ResponseError): + """The sender account must have a verified + email address for this payment. + """ diff --git a/desktop/core/ext-py/boto-2.38.0/boto/fps/response.py b/desktop/core/ext-py/boto-2.38.0/boto/fps/response.py new file mode 100644 index 0000000000000000000000000000000000000000..c0a9e2837f8395fd5288c9fb8e3ce9d5b13ab52d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/fps/response.py @@ -0,0 +1,207 @@ +# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# Copyright (c) 2010 Jason R. Coombs http://www.jaraco.com/ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from decimal import Decimal +from boto.compat import filter, map + + +def ResponseFactory(action): + class FPSResponse(Response): + _action = action + _Result = globals().get(action + 'Result', ResponseElement) + + # due to nodes receiving their closing tags + def endElement(self, name, value, connection): + if name != action + 'Response': + super(FPSResponse, self).endElement(name, value, connection) + return FPSResponse + + +class ResponseElement(object): + def __init__(self, connection=None, name=None): + if connection is not None: + self._connection = connection + self._name = name or self.__class__.__name__ + + @property + def connection(self): + return self._connection + + def __repr__(self): + render = lambda pair: '{!s}: {!r}'.format(*pair) + do_show = lambda pair: not pair[0].startswith('_') + attrs = filter(do_show, self.__dict__.items()) + return '{0}({1})'.format(self.__class__.__name__, + ', '.join(map(render, attrs))) + + def startElement(self, name, attrs, connection): + return None + + # due to nodes receiving their closing tags + def endElement(self, name, value, connection): + if name != self._name: + setattr(self, name, value) + + +class Response(ResponseElement): + _action = 'Undefined' + + def startElement(self, name, attrs, connection): + if name == 'ResponseMetadata': + setattr(self, name, ResponseElement(name=name)) + elif name == self._action + 'Result': + setattr(self, name, self._Result(name=name)) + else: + return super(Response, self).startElement(name, attrs, connection) + return getattr(self, name) + + +class ComplexAmount(ResponseElement): + def __repr__(self): + return '{0} {1}'.format(self.CurrencyCode, self.Value) + + def __float__(self): + return float(self.Value) + + def __str__(self): + return str(self.Value) + + def startElement(self, name, attrs, connection): + if name not in ('CurrencyCode', 'Value'): + message = 'Unrecognized tag {0} in ComplexAmount'.format(name) + raise AssertionError(message) + return super(ComplexAmount, self).startElement(name, attrs, connection) + + def endElement(self, name, value, connection): + if name == 'Value': + value = Decimal(value) + super(ComplexAmount, self).endElement(name, value, connection) + + +class AmountCollection(ResponseElement): + def startElement(self, name, attrs, connection): + setattr(self, name, ComplexAmount(name=name)) + return getattr(self, name) + + +class AccountBalance(AmountCollection): + def startElement(self, name, attrs, connection): + if name == 'AvailableBalances': + setattr(self, name, AmountCollection(name=name)) + return getattr(self, name) + return super(AccountBalance, self).startElement(name, attrs, connection) + + +class GetAccountBalanceResult(ResponseElement): + def startElement(self, name, attrs, connection): + if name == 'AccountBalance': + setattr(self, name, AccountBalance(name=name)) + return getattr(self, name) + return super(GetAccountBalanceResult, self).startElement(name, attrs, + connection) + + +class GetTotalPrepaidLiabilityResult(ResponseElement): + def startElement(self, name, attrs, connection): + if name == 'OutstandingPrepaidLiability': + setattr(self, name, AmountCollection(name=name)) + return getattr(self, name) + return super(GetTotalPrepaidLiabilityResult, self).startElement(name, + attrs, connection) + + +class GetPrepaidBalanceResult(ResponseElement): + def startElement(self, name, attrs, connection): + if name == 'PrepaidBalance': + setattr(self, name, AmountCollection(name=name)) + return getattr(self, name) + return super(GetPrepaidBalanceResult, self).startElement(name, attrs, + connection) + + +class GetOutstandingDebtBalanceResult(ResponseElement): + def startElement(self, name, attrs, connection): + if name == 'OutstandingDebt': + setattr(self, name, AmountCollection(name=name)) + return getattr(self, name) + return super(GetOutstandingDebtBalanceResult, self).startElement(name, + attrs, connection) + + +class TransactionPart(ResponseElement): + def startElement(self, name, attrs, connection): + if name == 'FeesPaid': + setattr(self, name, ComplexAmount(name=name)) + return getattr(self, name) + return super(TransactionPart, self).startElement(name, attrs, + connection) + + +class Transaction(ResponseElement): + def __init__(self, *args, **kw): + self.TransactionPart = [] + super(Transaction, self).__init__(*args, **kw) + + def startElement(self, name, attrs, connection): + if name == 'TransactionPart': + getattr(self, name).append(TransactionPart(name=name)) + return getattr(self, name)[-1] + if name in ('TransactionAmount', 'FPSFees', 'Balance'): + setattr(self, name, ComplexAmount(name=name)) + return getattr(self, name) + return super(Transaction, self).startElement(name, attrs, connection) + + +class GetAccountActivityResult(ResponseElement): + def __init__(self, *args, **kw): + self.Transaction = [] + super(GetAccountActivityResult, self).__init__(*args, **kw) + + def startElement(self, name, attrs, connection): + if name == 'Transaction': + getattr(self, name).append(Transaction(name=name)) + return getattr(self, name)[-1] + return super(GetAccountActivityResult, self).startElement(name, attrs, + connection) + + +class GetTransactionResult(ResponseElement): + def startElement(self, name, attrs, connection): + if name == 'Transaction': + setattr(self, name, Transaction(name=name)) + return getattr(self, name) + return super(GetTransactionResult, self).startElement(name, attrs, + connection) + + +class GetTokensResult(ResponseElement): + def __init__(self, *args, **kw): + self.Token = [] + super(GetTokensResult, self).__init__(*args, **kw) + + def startElement(self, name, attrs, connection): + if name == 'Token': + getattr(self, name).append(ResponseElement(name=name)) + return getattr(self, name)[-1] + return super(GetTokensResult, self).startElement(name, attrs, + connection) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/glacier/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/glacier/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..713740b429bd162f352cb5e5783da296fa0f0eee --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/glacier/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon Glacier service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.glacier.layer2 import Layer2 + return get_regions('glacier', connection_cls=Layer2) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/glacier/concurrent.py b/desktop/core/ext-py/boto-2.38.0/boto/glacier/concurrent.py new file mode 100644 index 0000000000000000000000000000000000000000..a4f3a224a0951fd44a7d345b92eb2128f66a45fb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/glacier/concurrent.py @@ -0,0 +1,425 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os +import math +import threading +import hashlib +import time +import logging +from boto.compat import Queue +import binascii + +from boto.glacier.utils import DEFAULT_PART_SIZE, minimum_part_size, \ + chunk_hashes, tree_hash, bytes_to_hex +from boto.glacier.exceptions import UploadArchiveError, \ + DownloadArchiveError, \ + TreeHashDoesNotMatchError + + +_END_SENTINEL = object() +log = logging.getLogger('boto.glacier.concurrent') + + +class ConcurrentTransferer(object): + def __init__(self, part_size=DEFAULT_PART_SIZE, num_threads=10): + self._part_size = part_size + self._num_threads = num_threads + self._threads = [] + + def _calculate_required_part_size(self, total_size): + min_part_size_required = minimum_part_size(total_size) + if self._part_size >= min_part_size_required: + part_size = self._part_size + else: + part_size = min_part_size_required + log.debug("The part size specified (%s) is smaller than " + "the minimum required part size. Using a part " + "size of: %s", self._part_size, part_size) + total_parts = int(math.ceil(total_size / float(part_size))) + return total_parts, part_size + + def _shutdown_threads(self): + log.debug("Shutting down threads.") + for thread in self._threads: + thread.should_continue = False + for thread in self._threads: + thread.join() + log.debug("Threads have exited.") + + def _add_work_items_to_queue(self, total_parts, worker_queue, part_size): + log.debug("Adding work items to queue.") + for i in range(total_parts): + worker_queue.put((i, part_size)) + for i in range(self._num_threads): + worker_queue.put(_END_SENTINEL) + + +class ConcurrentUploader(ConcurrentTransferer): + """Concurrently upload an archive to glacier. + + This class uses a thread pool to concurrently upload an archive + to glacier using the multipart upload API. + + The threadpool is completely managed by this class and is + transparent to the users of this class. + + """ + def __init__(self, api, vault_name, part_size=DEFAULT_PART_SIZE, + num_threads=10): + """ + :type api: :class:`boto.glacier.layer1.Layer1` + :param api: A layer1 glacier object. + + :type vault_name: str + :param vault_name: The name of the vault. + + :type part_size: int + :param part_size: The size, in bytes, of the chunks to use when uploading + the archive parts. The part size must be a megabyte multiplied by + a power of two. + + :type num_threads: int + :param num_threads: The number of threads to spawn for the thread pool. + The number of threads will control how much parts are being + concurrently uploaded. + + """ + super(ConcurrentUploader, self).__init__(part_size, num_threads) + self._api = api + self._vault_name = vault_name + + def upload(self, filename, description=None): + """Concurrently create an archive. + + The part_size value specified when the class was constructed + will be used *unless* it is smaller than the minimum required + part size needed for the size of the given file. In that case, + the part size used will be the minimum part size required + to properly upload the given file. + + :type file: str + :param file: The filename to upload + + :type description: str + :param description: The description of the archive. + + :rtype: str + :return: The archive id of the newly created archive. + + """ + total_size = os.stat(filename).st_size + total_parts, part_size = self._calculate_required_part_size(total_size) + hash_chunks = [None] * total_parts + worker_queue = Queue() + result_queue = Queue() + response = self._api.initiate_multipart_upload(self._vault_name, + part_size, + description) + upload_id = response['UploadId'] + # The basic idea is to add the chunks (the offsets not the actual + # contents) to a work queue, start up a thread pool, let the crank + # through the items in the work queue, and then place their results + # in a result queue which we use to complete the multipart upload. + self._add_work_items_to_queue(total_parts, worker_queue, part_size) + self._start_upload_threads(result_queue, upload_id, + worker_queue, filename) + try: + self._wait_for_upload_threads(hash_chunks, result_queue, + total_parts) + except UploadArchiveError as e: + log.debug("An error occurred while uploading an archive, " + "aborting multipart upload.") + self._api.abort_multipart_upload(self._vault_name, upload_id) + raise e + log.debug("Completing upload.") + response = self._api.complete_multipart_upload( + self._vault_name, upload_id, bytes_to_hex(tree_hash(hash_chunks)), + total_size) + log.debug("Upload finished.") + return response['ArchiveId'] + + def _wait_for_upload_threads(self, hash_chunks, result_queue, total_parts): + for _ in range(total_parts): + result = result_queue.get() + if isinstance(result, Exception): + log.debug("An error was found in the result queue, terminating " + "threads: %s", result) + self._shutdown_threads() + raise UploadArchiveError("An error occurred while uploading " + "an archive: %s" % result) + # Each unit of work returns the tree hash for the given part + # number, which we use at the end to compute the tree hash of + # the entire archive. + part_number, tree_sha256 = result + hash_chunks[part_number] = tree_sha256 + self._shutdown_threads() + + def _start_upload_threads(self, result_queue, upload_id, worker_queue, + filename): + log.debug("Starting threads.") + for _ in range(self._num_threads): + thread = UploadWorkerThread(self._api, self._vault_name, filename, + upload_id, worker_queue, result_queue) + time.sleep(0.2) + thread.start() + self._threads.append(thread) + + +class TransferThread(threading.Thread): + def __init__(self, worker_queue, result_queue): + super(TransferThread, self).__init__() + self._worker_queue = worker_queue + self._result_queue = result_queue + # This value can be set externally by other objects + # to indicate that the thread should be shut down. + self.should_continue = True + + def run(self): + while self.should_continue: + try: + work = self._worker_queue.get(timeout=1) + except Empty: + continue + if work is _END_SENTINEL: + self._cleanup() + return + result = self._process_chunk(work) + self._result_queue.put(result) + self._cleanup() + + def _process_chunk(self, work): + pass + + def _cleanup(self): + pass + + +class UploadWorkerThread(TransferThread): + def __init__(self, api, vault_name, filename, upload_id, + worker_queue, result_queue, num_retries=5, + time_between_retries=5, + retry_exceptions=Exception): + super(UploadWorkerThread, self).__init__(worker_queue, result_queue) + self._api = api + self._vault_name = vault_name + self._filename = filename + self._fileobj = open(filename, 'rb') + self._upload_id = upload_id + self._num_retries = num_retries + self._time_between_retries = time_between_retries + self._retry_exceptions = retry_exceptions + + def _process_chunk(self, work): + result = None + for i in range(self._num_retries + 1): + try: + result = self._upload_chunk(work) + break + except self._retry_exceptions as e: + log.error("Exception caught uploading part number %s for " + "vault %s, attempt: (%s / %s), filename: %s, " + "exception: %s, msg: %s", + work[0], self._vault_name, i + 1, self._num_retries + 1, + self._filename, e.__class__, e) + time.sleep(self._time_between_retries) + result = e + return result + + def _upload_chunk(self, work): + part_number, part_size = work + start_byte = part_number * part_size + self._fileobj.seek(start_byte) + contents = self._fileobj.read(part_size) + linear_hash = hashlib.sha256(contents).hexdigest() + tree_hash_bytes = tree_hash(chunk_hashes(contents)) + byte_range = (start_byte, start_byte + len(contents) - 1) + log.debug("Uploading chunk %s of size %s", part_number, part_size) + response = self._api.upload_part(self._vault_name, self._upload_id, + linear_hash, + bytes_to_hex(tree_hash_bytes), + byte_range, contents) + # Reading the response allows the connection to be reused. + response.read() + return (part_number, tree_hash_bytes) + + def _cleanup(self): + self._fileobj.close() + + +class ConcurrentDownloader(ConcurrentTransferer): + """ + Concurrently download an archive from glacier. + + This class uses a thread pool to concurrently download an archive + from glacier. + + The threadpool is completely managed by this class and is + transparent to the users of this class. + + """ + def __init__(self, job, part_size=DEFAULT_PART_SIZE, + num_threads=10): + """ + :param job: A layer2 job object for archive retrieval object. + + :param part_size: The size, in bytes, of the chunks to use when uploading + the archive parts. The part size must be a megabyte multiplied by + a power of two. + + """ + super(ConcurrentDownloader, self).__init__(part_size, num_threads) + self._job = job + + def download(self, filename): + """ + Concurrently download an archive. + + :param filename: The filename to download the archive to + :type filename: str + + """ + total_size = self._job.archive_size + total_parts, part_size = self._calculate_required_part_size(total_size) + worker_queue = Queue() + result_queue = Queue() + self._add_work_items_to_queue(total_parts, worker_queue, part_size) + self._start_download_threads(result_queue, worker_queue) + try: + self._wait_for_download_threads(filename, result_queue, total_parts) + except DownloadArchiveError as e: + log.debug("An error occurred while downloading an archive: %s", e) + raise e + log.debug("Download completed.") + + def _wait_for_download_threads(self, filename, result_queue, total_parts): + """ + Waits until the result_queue is filled with all the downloaded parts + This indicates that all part downloads have completed + + Saves downloaded parts into filename + + :param filename: + :param result_queue: + :param total_parts: + """ + hash_chunks = [None] * total_parts + with open(filename, "wb") as f: + for _ in range(total_parts): + result = result_queue.get() + if isinstance(result, Exception): + log.debug("An error was found in the result queue, " + "terminating threads: %s", result) + self._shutdown_threads() + raise DownloadArchiveError( + "An error occurred while uploading " + "an archive: %s" % result) + part_number, part_size, actual_hash, data = result + hash_chunks[part_number] = actual_hash + start_byte = part_number * part_size + f.seek(start_byte) + f.write(data) + f.flush() + final_hash = bytes_to_hex(tree_hash(hash_chunks)) + log.debug("Verifying final tree hash of archive, expecting: %s, " + "actual: %s", self._job.sha256_treehash, final_hash) + if self._job.sha256_treehash != final_hash: + self._shutdown_threads() + raise TreeHashDoesNotMatchError( + "Tree hash for entire archive does not match, " + "expected: %s, got: %s" % (self._job.sha256_treehash, + final_hash)) + self._shutdown_threads() + + def _start_download_threads(self, result_queue, worker_queue): + log.debug("Starting threads.") + for _ in range(self._num_threads): + thread = DownloadWorkerThread(self._job, worker_queue, result_queue) + time.sleep(0.2) + thread.start() + self._threads.append(thread) + + +class DownloadWorkerThread(TransferThread): + def __init__(self, job, + worker_queue, result_queue, + num_retries=5, + time_between_retries=5, + retry_exceptions=Exception): + """ + Individual download thread that will download parts of the file from Glacier. Parts + to download stored in work queue. + + Parts download to a temp dir with each part a separate file + + :param job: Glacier job object + :param work_queue: A queue of tuples which include the part_number and + part_size + :param result_queue: A priority queue of tuples which include the + part_number and the path to the temp file that holds that + part's data. + + """ + super(DownloadWorkerThread, self).__init__(worker_queue, result_queue) + self._job = job + self._num_retries = num_retries + self._time_between_retries = time_between_retries + self._retry_exceptions = retry_exceptions + + def _process_chunk(self, work): + """ + Attempt to download a part of the archive from Glacier + Store the result in the result_queue + + :param work: + """ + result = None + for _ in range(self._num_retries): + try: + result = self._download_chunk(work) + break + except self._retry_exceptions as e: + log.error("Exception caught downloading part number %s for " + "job %s", work[0], self._job,) + time.sleep(self._time_between_retries) + result = e + return result + + def _download_chunk(self, work): + """ + Downloads a chunk of archive from Glacier. Saves the data to a temp file + Returns the part number and temp file location + + :param work: + """ + part_number, part_size = work + start_byte = part_number * part_size + byte_range = (start_byte, start_byte + part_size - 1) + log.debug("Downloading chunk %s of size %s", part_number, part_size) + response = self._job.get_output(byte_range) + data = response.read() + actual_hash = bytes_to_hex(tree_hash(chunk_hashes(data))) + if response['TreeHash'] != actual_hash: + raise TreeHashDoesNotMatchError( + "Tree hash for part number %s does not match, " + "expected: %s, got: %s" % (part_number, response['TreeHash'], + actual_hash)) + return (part_number, part_size, binascii.unhexlify(actual_hash), data) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/glacier/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/glacier/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..c8bce1fe9b986a100b88d3586f381a2d882ddf98 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/glacier/exceptions.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import json + + +class UnexpectedHTTPResponseError(Exception): + def __init__(self, expected_responses, response): + self.status = response.status + self.body = response.read() + self.code = None + try: + body = json.loads(self.body) + self.code = body["code"] + msg = 'Expected %s, got ' % expected_responses + msg += '(%d, code=%s, message=%s)' % (response.status, + self.code, + body["message"]) + except Exception: + msg = 'Expected %s, got (%d, %s)' % (expected_responses, + response.status, + self.body) + super(UnexpectedHTTPResponseError, self).__init__(msg) + + +class ArchiveError(Exception): + pass + + +class UploadArchiveError(ArchiveError): + pass + + +class DownloadArchiveError(ArchiveError): + pass + + +class TreeHashDoesNotMatchError(ArchiveError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/glacier/job.py b/desktop/core/ext-py/boto-2.38.0/boto/glacier/job.py new file mode 100644 index 0000000000000000000000000000000000000000..33e66a196c49b7e7ba0af2139e0e7da0633ea50b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/glacier/job.py @@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import math +import socket + +from boto.glacier.exceptions import TreeHashDoesNotMatchError, \ + DownloadArchiveError +from boto.glacier.utils import tree_hash_from_str + + +class Job(object): + + DefaultPartSize = 4 * 1024 * 1024 + + ResponseDataElements = (('Action', 'action', None), + ('ArchiveId', 'archive_id', None), + ('ArchiveSizeInBytes', 'archive_size', 0), + ('Completed', 'completed', False), + ('CompletionDate', 'completion_date', None), + ('CreationDate', 'creation_date', None), + ('InventorySizeInBytes', 'inventory_size', 0), + ('JobDescription', 'description', None), + ('JobId', 'id', None), + ('SHA256TreeHash', 'sha256_treehash', None), + ('SNSTopic', 'sns_topic', None), + ('StatusCode', 'status_code', None), + ('StatusMessage', 'status_message', None), + ('VaultARN', 'arn', None)) + + def __init__(self, vault, response_data=None): + self.vault = vault + if response_data: + for response_name, attr_name, default in self.ResponseDataElements: + setattr(self, attr_name, response_data[response_name]) + else: + for response_name, attr_name, default in self.ResponseDataElements: + setattr(self, attr_name, default) + + def __repr__(self): + return 'Job(%s)' % self.arn + + def get_output(self, byte_range=None, validate_checksum=False): + """ + This operation downloads the output of the job. Depending on + the job type you specified when you initiated the job, the + output will be either the content of an archive or a vault + inventory. + + You can download all the job output or download a portion of + the output by specifying a byte range. In the case of an + archive retrieval job, depending on the byte range you + specify, Amazon Glacier returns the checksum for the portion + of the data. You can compute the checksum on the client and + verify that the values match to ensure the portion you + downloaded is the correct data. + + :type byte_range: tuple + :param range: A tuple of integer specifying the slice (in bytes) + of the archive you want to receive + + :type validate_checksum: bool + :param validate_checksum: Specify whether or not to validate + the associate tree hash. If the response does not contain + a TreeHash, then no checksum will be verified. + + """ + response = self.vault.layer1.get_job_output(self.vault.name, + self.id, + byte_range) + if validate_checksum and 'TreeHash' in response: + data = response.read() + actual_tree_hash = tree_hash_from_str(data) + if response['TreeHash'] != actual_tree_hash: + raise TreeHashDoesNotMatchError( + "The calculated tree hash %s does not match the " + "expected tree hash %s for the byte range %s" % ( + actual_tree_hash, response['TreeHash'], byte_range)) + return response + + def _calc_num_chunks(self, chunk_size): + return int(math.ceil(self.archive_size / float(chunk_size))) + + def download_to_file(self, filename, chunk_size=DefaultPartSize, + verify_hashes=True, retry_exceptions=(socket.error,)): + """Download an archive to a file by name. + + :type filename: str + :param filename: The name of the file where the archive + contents will be saved. + + :type chunk_size: int + :param chunk_size: The chunk size to use when downloading + the archive. + + :type verify_hashes: bool + :param verify_hashes: Indicates whether or not to verify + the tree hashes for each downloaded chunk. + + """ + num_chunks = self._calc_num_chunks(chunk_size) + with open(filename, 'wb') as output_file: + self._download_to_fileob(output_file, num_chunks, chunk_size, + verify_hashes, retry_exceptions) + + def download_to_fileobj(self, output_file, chunk_size=DefaultPartSize, + verify_hashes=True, + retry_exceptions=(socket.error,)): + """Download an archive to a file object. + + :type output_file: file + :param output_file: The file object where the archive + contents will be saved. + + :type chunk_size: int + :param chunk_size: The chunk size to use when downloading + the archive. + + :type verify_hashes: bool + :param verify_hashes: Indicates whether or not to verify + the tree hashes for each downloaded chunk. + + """ + num_chunks = self._calc_num_chunks(chunk_size) + self._download_to_fileob(output_file, num_chunks, chunk_size, + verify_hashes, retry_exceptions) + + def _download_to_fileob(self, fileobj, num_chunks, chunk_size, verify_hashes, + retry_exceptions): + for i in range(num_chunks): + byte_range = ((i * chunk_size), ((i + 1) * chunk_size) - 1) + data, expected_tree_hash = self._download_byte_range( + byte_range, retry_exceptions) + if verify_hashes: + actual_tree_hash = tree_hash_from_str(data) + if expected_tree_hash != actual_tree_hash: + raise TreeHashDoesNotMatchError( + "The calculated tree hash %s does not match the " + "expected tree hash %s for the byte range %s" % ( + actual_tree_hash, expected_tree_hash, byte_range)) + fileobj.write(data) + + def _download_byte_range(self, byte_range, retry_exceptions): + # You can occasionally get socket.errors when downloading + # chunks from Glacier, so each chunk can be retried up + # to 5 times. + for _ in range(5): + try: + response = self.get_output(byte_range) + data = response.read() + expected_tree_hash = response['TreeHash'] + return data, expected_tree_hash + except retry_exceptions as e: + continue + else: + raise DownloadArchiveError("There was an error downloading" + "byte range %s: %s" % (byte_range, + e)) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/glacier/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/glacier/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..39136cf03fce67eef51c924f38107474d0835088 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/glacier/layer1.py @@ -0,0 +1,1279 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import os + +import boto.glacier +from boto.compat import json +from boto.connection import AWSAuthConnection +from boto.glacier.exceptions import UnexpectedHTTPResponseError +from boto.glacier.response import GlacierResponse +from boto.glacier.utils import ResettingFileSender + + +class Layer1(AWSAuthConnection): + """ + Amazon Glacier is a storage solution for "cold data." + + Amazon Glacier is an extremely low-cost storage service that + provides secure, durable and easy-to-use storage for data backup + and archival. With Amazon Glacier, customers can store their data + cost effectively for months, years, or decades. Amazon Glacier + also enables customers to offload the administrative burdens of + operating and scaling storage to AWS, so they don't have to worry + about capacity planning, hardware provisioning, data replication, + hardware failure and recovery, or time-consuming hardware + migrations. + + Amazon Glacier is a great storage choice when low storage cost is + paramount, your data is rarely retrieved, and retrieval latency of + several hours is acceptable. If your application requires fast or + frequent access to your data, consider using Amazon S3. For more + information, go to `Amazon Simple Storage Service (Amazon S3)`_. + + You can store any kind of data in any format. There is no maximum + limit on the total amount of data you can store in Amazon Glacier. + + If you are a first-time user of Amazon Glacier, we recommend that + you begin by reading the following sections in the Amazon Glacier + Developer Guide : + + + + `What is Amazon Glacier`_ - This section of the Developer Guide + describes the underlying data model, the operations it supports, + and the AWS SDKs that you can use to interact with the service. + + `Getting Started with Amazon Glacier`_ - The Getting Started + section walks you through the process of creating a vault, + uploading archives, creating jobs to download archives, retrieving + the job output, and deleting archives. + """ + Version = '2012-06-01' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + account_id='-', is_secure=True, port=None, + proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, path='/', + provider='aws', security_token=None, + suppress_consec_slashes=True, + region=None, region_name='us-east-1', + profile_name=None): + + if not region: + for reg in boto.glacier.regions(): + if reg.name == region_name: + region = reg + break + + self.region = region + self.account_id = account_id + super(Layer1, self).__init__(region.endpoint, + aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, debug, + https_connection_factory, + path, provider, security_token, + suppress_consec_slashes, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def make_request(self, verb, resource, headers=None, + data='', ok_responses=(200,), params=None, + sender=None, response_headers=None): + if headers is None: + headers = {} + headers['x-amz-glacier-version'] = self.Version + uri = '/%s/%s' % (self.account_id, resource) + response = super(Layer1, self).make_request(verb, uri, + params=params, + headers=headers, + sender=sender, + data=data) + if response.status in ok_responses: + return GlacierResponse(response, response_headers) + else: + # create glacier-specific exceptions + raise UnexpectedHTTPResponseError(ok_responses, response) + + # Vaults + + def list_vaults(self, limit=None, marker=None): + """ + This operation lists all vaults owned by the calling user's + account. The list returned in the response is ASCII-sorted by + vault name. + + By default, this operation returns up to 1,000 items. If there + are more vaults to list, the response `marker` field contains + the vault Amazon Resource Name (ARN) at which to continue the + list with a new List Vaults request; otherwise, the `marker` + field is `null`. To return a list of vaults that begins at a + specific vault, set the `marker` request parameter to the + vault ARN you obtained from a previous List Vaults request. + You can also limit the number of vaults returned in the + response by specifying the `limit` parameter in the request. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Retrieving Vault Metadata in Amazon Glacier`_ and `List + Vaults `_ in the Amazon Glacier Developer Guide . + + :type marker: string + :param marker: A string used for pagination. The marker specifies the + vault ARN after which the listing of vaults should begin. + + :type limit: string + :param limit: The maximum number of items returned in the response. If + you don't specify a value, the List Vaults operation returns up to + 1,000 items. + """ + params = {} + if limit: + params['limit'] = limit + if marker: + params['marker'] = marker + return self.make_request('GET', 'vaults', params=params) + + def describe_vault(self, vault_name): + """ + This operation returns information about a vault, including + the vault's Amazon Resource Name (ARN), the date the vault was + created, the number of archives it contains, and the total + size of all the archives in the vault. The number of archives + and their total size are as of the last inventory generation. + This means that if you add or remove an archive from a vault, + and then immediately use Describe Vault, the change in + contents will not be immediately reflected. If you want to + retrieve the latest inventory of the vault, use InitiateJob. + Amazon Glacier generates vault inventories approximately + daily. For more information, see `Downloading a Vault + Inventory in Amazon Glacier`_. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Retrieving Vault Metadata in Amazon Glacier`_ and `Describe + Vault `_ in the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + """ + uri = 'vaults/%s' % vault_name + return self.make_request('GET', uri) + + def create_vault(self, vault_name): + """ + This operation creates a new vault with the specified name. + The name of the vault must be unique within a region for an + AWS account. You can create up to 1,000 vaults per account. If + you need to create more vaults, contact Amazon Glacier. + + You must use the following guidelines when naming a vault. + + + + + Names can be between 1 and 255 characters long. + + Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' + (hyphen), and '.' (period). + + + + This operation is idempotent. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Creating a Vault in Amazon Glacier`_ and `Create Vault `_ in + the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + """ + uri = 'vaults/%s' % vault_name + return self.make_request('PUT', uri, ok_responses=(201,), + response_headers=[('Location', 'Location')]) + + def delete_vault(self, vault_name): + """ + This operation deletes a vault. Amazon Glacier will delete a + vault only if there are no archives in the vault as of the + last inventory and there have been no writes to the vault + since the last inventory. If either of these conditions is not + satisfied, the vault deletion fails (that is, the vault is not + removed) and Amazon Glacier returns an error. You can use + DescribeVault to return the number of archives in a vault, and + you can use `Initiate a Job (POST jobs)`_ to initiate a new + inventory retrieval for a vault. The inventory contains the + archive IDs you use to delete archives using `Delete Archive + (DELETE archive)`_. + + This operation is idempotent. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Deleting a Vault in Amazon Glacier`_ and `Delete Vault `_ in + the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + """ + uri = 'vaults/%s' % vault_name + return self.make_request('DELETE', uri, ok_responses=(204,)) + + def get_vault_notifications(self, vault_name): + """ + This operation retrieves the `notification-configuration` + subresource of the specified vault. + + For information about setting a notification configuration on + a vault, see SetVaultNotifications. If a notification + configuration for a vault is not set, the operation returns a + `404 Not Found` error. For more information about vault + notifications, see `Configuring Vault Notifications in Amazon + Glacier`_. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Configuring Vault Notifications in Amazon Glacier`_ and `Get + Vault Notification Configuration `_ in the Amazon Glacier + Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + """ + uri = 'vaults/%s/notification-configuration' % vault_name + return self.make_request('GET', uri) + + def set_vault_notifications(self, vault_name, notification_config): + """ + This operation configures notifications that will be sent when + specific events happen to a vault. By default, you don't get + any notifications. + + To configure vault notifications, send a PUT request to the + `notification-configuration` subresource of the vault. The + request should include a JSON document that provides an Amazon + SNS topic and specific events for which you want Amazon + Glacier to send notifications to the topic. + + Amazon SNS topics must grant permission to the vault to be + allowed to publish notifications to the topic. You can + configure a vault to publish a notification for the following + vault events: + + + + **ArchiveRetrievalCompleted** This event occurs when a job + that was initiated for an archive retrieval is completed + (InitiateJob). The status of the completed job can be + "Succeeded" or "Failed". The notification sent to the SNS + topic is the same output as returned from DescribeJob. + + **InventoryRetrievalCompleted** This event occurs when a job + that was initiated for an inventory retrieval is completed + (InitiateJob). The status of the completed job can be + "Succeeded" or "Failed". The notification sent to the SNS + topic is the same output as returned from DescribeJob. + + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Configuring Vault Notifications in Amazon Glacier`_ and `Set + Vault Notification Configuration `_ in the Amazon Glacier + Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type vault_notification_config: dict + :param vault_notification_config: Provides options for specifying + notification configuration. + + The format of the dictionary is: + + {'SNSTopic': 'mytopic', + 'Events': [event1,...]} + """ + uri = 'vaults/%s/notification-configuration' % vault_name + json_config = json.dumps(notification_config) + return self.make_request('PUT', uri, data=json_config, + ok_responses=(204,)) + + def delete_vault_notifications(self, vault_name): + """ + This operation deletes the notification configuration set for + a vault. The operation is eventually consistent;that is, it + might take some time for Amazon Glacier to completely disable + the notifications and you might still receive some + notifications for a short time after you send the delete + request. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Configuring Vault Notifications in Amazon Glacier`_ and + `Delete Vault Notification Configuration `_ in the Amazon + Glacier Developer Guide. + + :type vault_name: string + :param vault_name: The name of the vault. + """ + uri = 'vaults/%s/notification-configuration' % vault_name + return self.make_request('DELETE', uri, ok_responses=(204,)) + + # Jobs + + def list_jobs(self, vault_name, completed=None, status_code=None, + limit=None, marker=None): + """ + This operation lists jobs for a vault, including jobs that are + in-progress and jobs that have recently finished. + + + Amazon Glacier retains recently completed jobs for a period + before deleting them; however, it eventually removes completed + jobs. The output of completed jobs can be retrieved. Retaining + completed jobs for a period of time after they have completed + enables you to get a job output in the event you miss the job + completion notification or your first attempt to download it + fails. For example, suppose you start an archive retrieval job + to download an archive. After the job completes, you start to + download the archive but encounter a network error. In this + scenario, you can retry and download the archive while the job + exists. + + + To retrieve an archive or retrieve a vault inventory from + Amazon Glacier, you first initiate a job, and after the job + completes, you download the data. For an archive retrieval, + the output is the archive data, and for an inventory + retrieval, it is the inventory list. The List Job operation + returns a list of these jobs sorted by job initiation time. + + This List Jobs operation supports pagination. By default, this + operation returns up to 1,000 jobs in the response. You should + always check the response for a `marker` at which to continue + the list; if there are no more items the `marker` is `null`. + To return a list of jobs that begins at a specific job, set + the `marker` request parameter to the value you obtained from + a previous List Jobs request. You can also limit the number of + jobs returned in the response by specifying the `limit` + parameter in the request. + + Additionally, you can filter the jobs list returned by + specifying an optional `statuscode` (InProgress, Succeeded, or + Failed) and `completed` (true, false) parameter. The + `statuscode` allows you to specify that only jobs that match a + specified status are returned. The `completed` parameter + allows you to specify that only jobs in a specific completion + state are returned. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For the underlying REST API, go to `List Jobs `_ + + :type vault_name: string + :param vault_name: The name of the vault. + + :type limit: string + :param limit: Specifies that the response be limited to the specified + number of items or fewer. If not specified, the List Jobs operation + returns up to 1,000 jobs. + + :type marker: string + :param marker: An opaque string used for pagination. This value + specifies the job at which the listing of jobs should begin. Get + the marker value from a previous List Jobs response. You need only + include the marker if you are continuing the pagination of results + started in a previous List Jobs request. + + :type statuscode: string + :param statuscode: Specifies the type of job status to return. You can + specify the following values: "InProgress", "Succeeded", or + "Failed". + + :type completed: string + :param completed: Specifies the state of the jobs to return. You can + specify `True` or `False`. + + """ + params = {} + if limit: + params['limit'] = limit + if marker: + params['marker'] = marker + if status_code: + params['statuscode'] = status_code + if completed is not None: + params['completed'] = 'true' if completed else 'false' + uri = 'vaults/%s/jobs' % vault_name + return self.make_request('GET', uri, params=params) + + def describe_job(self, vault_name, job_id): + """ + This operation returns information about a job you previously + initiated, including the job initiation date, the user who + initiated the job, the job status code/message and the Amazon + SNS topic to notify after Amazon Glacier completes the job. + For more information about initiating a job, see InitiateJob. + + + This operation enables you to check the status of your job. + However, it is strongly recommended that you set up an Amazon + SNS topic and specify it in your initiate job request so that + Amazon Glacier can notify the topic after it completes the + job. + + + A job ID will not expire for at least 24 hours after Amazon + Glacier completes the job. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For information about the underlying REST API, go to `Working + with Archives in Amazon Glacier`_ in the Amazon Glacier + Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type job_id: string + :param job_id: The ID of the job to describe. + """ + uri = 'vaults/%s/jobs/%s' % (vault_name, job_id) + return self.make_request('GET', uri, ok_responses=(200,)) + + def initiate_job(self, vault_name, job_data): + """ + This operation initiates a job of the specified type. In this + release, you can initiate a job to retrieve either an archive + or a vault inventory (a list of archives in a vault). + + Retrieving data from Amazon Glacier is a two-step process: + + + #. Initiate a retrieval job. + #. After the job completes, download the bytes. + + + The retrieval request is executed asynchronously. When you + initiate a retrieval job, Amazon Glacier creates a job and + returns a job ID in the response. When Amazon Glacier + completes the job, you can get the job output (archive or + inventory data). For information about getting job output, see + GetJobOutput operation. + + The job must complete before you can get its output. To + determine when a job is complete, you have the following + options: + + + + **Use Amazon SNS Notification** You can specify an Amazon + Simple Notification Service (Amazon SNS) topic to which Amazon + Glacier can post a notification after the job is completed. + You can specify an SNS topic per job request. The notification + is sent only after Amazon Glacier completes the job. In + addition to specifying an SNS topic per job request, you can + configure vault notifications for a vault so that job + notifications are always sent. For more information, see + SetVaultNotifications. + + **Get job details** You can make a DescribeJob request to + obtain job status information while a job is in progress. + However, it is more efficient to use an Amazon SNS + notification to determine when a job is complete. + + + + The information you get via notification is same that you get + by calling DescribeJob. + + + If for a specific event, you add both the notification + configuration on the vault and also specify an SNS topic in + your initiate job request, Amazon Glacier sends both + notifications. For more information, see + SetVaultNotifications. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + **About the Vault Inventory** + + Amazon Glacier prepares an inventory for each vault + periodically, every 24 hours. When you initiate a job for a + vault inventory, Amazon Glacier returns the last inventory for + the vault. The inventory data you get might be up to a day or + two days old. Also, the initiate inventory job might take some + time to complete before you can download the vault inventory. + So you do not want to retrieve a vault inventory for each + vault operation. However, in some scenarios, you might find + the vault inventory useful. For example, when you upload an + archive, you can provide an archive description but not an + archive name. Amazon Glacier provides you a unique archive ID, + an opaque string of characters. So, you might maintain your + own database that maps archive names to their corresponding + Amazon Glacier assigned archive IDs. You might find the vault + inventory useful in the event you need to reconcile + information in your database with the actual vault inventory. + + **About Ranged Archive Retrieval** + + You can initiate an archive retrieval for the whole archive or + a range of the archive. In the case of ranged archive + retrieval, you specify a byte range to return or the whole + archive. The range specified must be megabyte (MB) aligned, + that is the range start value must be divisible by 1 MB and + range end value plus 1 must be divisible by 1 MB or equal the + end of the archive. If the ranged archive retrieval is not + megabyte aligned, this operation returns a 400 response. + Furthermore, to ensure you get checksum values for data you + download using Get Job Output API, the range must be tree hash + aligned. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Initiate a Job`_ and `Downloading a Vault Inventory`_ + + :type account_id: string + :param account_id: The `AccountId` is the AWS Account ID. You can + specify either the AWS Account ID or optionally a '-', in which + case Amazon Glacier uses the AWS Account ID associated with the + credentials used to sign the request. If you specify your Account + ID, do not include hyphens in it. + + :type vault_name: string + :param vault_name: The name of the vault. + + :type job_parameters: dict + :param job_parameters: Provides options for specifying job information. + The dictionary can contain the following attributes: + + * ArchiveId - The ID of the archive you want to retrieve. + This field is required only if the Type is set to + archive-retrieval. + * Description - The optional description for the job. + * Format - When initiating a job to retrieve a vault + inventory, you can optionally add this parameter to + specify the output format. Valid values are: CSV|JSON. + * SNSTopic - The Amazon SNS topic ARN where Amazon Glacier + sends a notification when the job is completed and the + output is ready for you to download. + * Type - The job type. Valid values are: + archive-retrieval|inventory-retrieval + * RetrievalByteRange - Optionally specify the range of + bytes to retrieve. + * InventoryRetrievalParameters: Optional job parameters + * Format - The output format, like "JSON" + * StartDate - ISO8601 starting date string + * EndDate - ISO8601 ending date string + * Limit - Maximum number of entries + * Marker - A unique string used for pagination + + """ + uri = 'vaults/%s/jobs' % vault_name + response_headers = [('x-amz-job-id', u'JobId'), + ('Location', u'Location')] + json_job_data = json.dumps(job_data) + return self.make_request('POST', uri, data=json_job_data, + ok_responses=(202,), + response_headers=response_headers) + + def get_job_output(self, vault_name, job_id, byte_range=None): + """ + This operation downloads the output of the job you initiated + using InitiateJob. Depending on the job type you specified + when you initiated the job, the output will be either the + content of an archive or a vault inventory. + + A job ID will not expire for at least 24 hours after Amazon + Glacier completes the job. That is, you can download the job + output within the 24 hours period after Amazon Glacier + completes the job. + + If the job output is large, then you can use the `Range` + request header to retrieve a portion of the output. This + allows you to download the entire output in smaller chunks of + bytes. For example, suppose you have 1 GB of job output you + want to download and you decide to download 128 MB chunks of + data at a time, which is a total of eight Get Job Output + requests. You use the following process to download the job + output: + + + #. Download a 128 MB chunk of output by specifying the + appropriate byte range using the `Range` header. + #. Along with the data, the response includes a checksum of + the payload. You compute the checksum of the payload on the + client and compare it with the checksum you received in the + response to ensure you received all the expected data. + #. Repeat steps 1 and 2 for all the eight 128 MB chunks of + output data, each time specifying the appropriate byte range. + #. After downloading all the parts of the job output, you have + a list of eight checksum values. Compute the tree hash of + these values to find the checksum of the entire output. Using + the Describe Job API, obtain job information of the job that + provided you the output. The response includes the checksum of + the entire archive stored in Amazon Glacier. You compare this + value with the checksum you computed to ensure you have + downloaded the entire archive content with no errors. + + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Downloading a Vault Inventory`_, `Downloading an Archive`_, + and `Get Job Output `_ + + :type account_id: string + :param account_id: The `AccountId` is the AWS Account ID. You can + specify either the AWS Account ID or optionally a '-', in which + case Amazon Glacier uses the AWS Account ID associated with the + credentials used to sign the request. If you specify your Account + ID, do not include hyphens in it. + + :type vault_name: string + :param vault_name: The name of the vault. + + :type job_id: string + :param job_id: The job ID whose data is downloaded. + + :type byte_range: string + :param byte_range: The range of bytes to retrieve from the output. For + example, if you want to download the first 1,048,576 bytes, specify + "Range: bytes=0-1048575". By default, this operation downloads the + entire output. + """ + response_headers = [('x-amz-sha256-tree-hash', u'TreeHash'), + ('Content-Range', u'ContentRange'), + ('Content-Type', u'ContentType')] + headers = None + if byte_range: + headers = {'Range': 'bytes=%d-%d' % byte_range} + uri = 'vaults/%s/jobs/%s/output' % (vault_name, job_id) + response = self.make_request('GET', uri, headers=headers, + ok_responses=(200, 206), + response_headers=response_headers) + return response + + # Archives + + def upload_archive(self, vault_name, archive, + linear_hash, tree_hash, description=None): + """ + This operation adds an archive to a vault. This is a + synchronous operation, and for a successful upload, your data + is durably persisted. Amazon Glacier returns the archive ID in + the `x-amz-archive-id` header of the response. + + You must use the archive ID to access your data in Amazon + Glacier. After you upload an archive, you should save the + archive ID returned so that you can retrieve or delete the + archive later. Besides saving the archive ID, you can also + index it and give it a friendly name to allow for better + searching. You can also use the optional archive description + field to specify how the archive is referred to in an external + index of archives, such as you might create in Amazon + DynamoDB. You can also get the vault inventory to obtain a + list of archive IDs in a vault. For more information, see + InitiateJob. + + You must provide a SHA256 tree hash of the data you are + uploading. For information about computing a SHA256 tree hash, + see `Computing Checksums`_. + + You can optionally specify an archive description of up to + 1,024 printable ASCII characters. You can get the archive + description when you either retrieve the archive or get the + vault inventory. For more information, see InitiateJob. Amazon + Glacier does not interpret the description in any way. An + archive description does not need to be unique. You cannot use + the description to retrieve or sort the archive list. + + Archives are immutable. After you upload an archive, you + cannot edit the archive or its description. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading an Archive in Amazon Glacier`_ and `Upload + Archive`_ in the Amazon Glacier Developer Guide . + + :type vault_name: str + :param vault_name: The name of the vault + + :type archive: bytes + :param archive: The data to upload. + + :type linear_hash: str + :param linear_hash: The SHA256 checksum (a linear hash) of the + payload. + + :type tree_hash: str + :param tree_hash: The user-computed SHA256 tree hash of the + payload. For more information on computing the + tree hash, see http://goo.gl/u7chF. + + :type description: str + :param description: The optional description of the archive you + are uploading. + """ + response_headers = [('x-amz-archive-id', u'ArchiveId'), + ('Location', u'Location'), + ('x-amz-sha256-tree-hash', u'TreeHash')] + uri = 'vaults/%s/archives' % vault_name + try: + content_length = str(len(archive)) + except (TypeError, AttributeError): + # If a file like object is provided, try to retrieve + # the file size via fstat. + content_length = str(os.fstat(archive.fileno()).st_size) + headers = {'x-amz-content-sha256': linear_hash, + 'x-amz-sha256-tree-hash': tree_hash, + 'Content-Length': content_length} + if description: + headers['x-amz-archive-description'] = description + if self._is_file_like(archive): + sender = ResettingFileSender(archive) + else: + sender = None + return self.make_request('POST', uri, headers=headers, + sender=sender, + data=archive, ok_responses=(201,), + response_headers=response_headers) + + def _is_file_like(self, archive): + return hasattr(archive, 'seek') and hasattr(archive, 'tell') + + def delete_archive(self, vault_name, archive_id): + """ + This operation deletes an archive from a vault. Subsequent + requests to initiate a retrieval of this archive will fail. + Archive retrievals that are in progress for this archive ID + may or may not succeed according to the following scenarios: + + + + If the archive retrieval job is actively preparing the data + for download when Amazon Glacier receives the delete archive + request, the archival retrieval operation might fail. + + If the archive retrieval job has successfully prepared the + archive for download when Amazon Glacier receives the delete + archive request, you will be able to download the output. + + + This operation is idempotent. Attempting to delete an already- + deleted archive does not result in an error. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Deleting an Archive in Amazon Glacier`_ and `Delete Archive`_ + in the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type archive_id: string + :param archive_id: The ID of the archive to delete. + """ + uri = 'vaults/%s/archives/%s' % (vault_name, archive_id) + return self.make_request('DELETE', uri, ok_responses=(204,)) + + # Multipart + + def initiate_multipart_upload(self, vault_name, part_size, + description=None): + """ + This operation initiates a multipart upload. Amazon Glacier + creates a multipart upload resource and returns its ID in the + response. The multipart upload ID is used in subsequent + requests to upload parts of an archive (see + UploadMultipartPart). + + When you initiate a multipart upload, you specify the part + size in number of bytes. The part size must be a megabyte + (1024 KB) multiplied by a power of 2-for example, 1048576 (1 + MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so + on. The minimum allowable part size is 1 MB, and the maximum + is 4 GB. + + Every part you upload to this resource (see + UploadMultipartPart), except the last one, must have the same + size. The last one can be the same size or smaller. For + example, suppose you want to upload a 16.2 MB file. If you + initiate the multipart upload with a part size of 4 MB, you + will upload four parts of 4 MB each and one part of 0.2 MB. + + + You don't need to know the size of the archive when you start + a multipart upload because Amazon Glacier does not require you + to specify the overall archive size. + + + After you complete the multipart upload, Amazon Glacier + removes the multipart upload resource referenced by the ID. + Amazon Glacier also removes the multipart upload resource if + you cancel the multipart upload or it may be removed if there + is no activity for a period of 24 hours. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading Large Archives in Parts (Multipart Upload)`_ and + `Initiate Multipart Upload`_ in the Amazon Glacier Developer + Guide . + + The part size must be a megabyte (1024 KB) multiplied by a power of + 2, for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), + 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, + and the maximum is 4 GB (4096 MB). + + :type vault_name: str + :param vault_name: The name of the vault. + + :type description: str + :param description: The archive description that you are uploading in + parts. + + :type part_size: int + :param part_size: The size of each part except the last, in bytes. The + last part can be smaller than this part size. + """ + response_headers = [('x-amz-multipart-upload-id', u'UploadId'), + ('Location', u'Location')] + headers = {'x-amz-part-size': str(part_size)} + if description: + headers['x-amz-archive-description'] = description + uri = 'vaults/%s/multipart-uploads' % vault_name + response = self.make_request('POST', uri, headers=headers, + ok_responses=(201,), + response_headers=response_headers) + return response + + def complete_multipart_upload(self, vault_name, upload_id, + sha256_treehash, archive_size): + """ + You call this operation to inform Amazon Glacier that all the + archive parts have been uploaded and that Amazon Glacier can + now assemble the archive from the uploaded parts. After + assembling and saving the archive to the vault, Amazon Glacier + returns the URI path of the newly created archive resource. + Using the URI path, you can then access the archive. After you + upload an archive, you should save the archive ID returned to + retrieve the archive at a later point. You can also get the + vault inventory to obtain a list of archive IDs in a vault. + For more information, see InitiateJob. + + In the request, you must include the computed SHA256 tree hash + of the entire archive you have uploaded. For information about + computing a SHA256 tree hash, see `Computing Checksums`_. On + the server side, Amazon Glacier also constructs the SHA256 + tree hash of the assembled archive. If the values match, + Amazon Glacier saves the archive to the vault; otherwise, it + returns an error, and the operation fails. The ListParts + operation returns a list of parts uploaded for a specific + multipart upload. It includes checksum information for each + uploaded part that can be used to debug a bad checksum issue. + + Additionally, Amazon Glacier also checks for any missing + content ranges when assembling the archive, if missing content + ranges are found, Amazon Glacier returns an error and the + operation fails. + + Complete Multipart Upload is an idempotent operation. After + your first successful complete multipart upload, if you call + the operation again within a short period, the operation will + succeed and return the same archive ID. This is useful in the + event you experience a network issue that causes an aborted + connection or receive a 500 server error, in which case you + can repeat your Complete Multipart Upload request and get the + same archive ID without creating duplicate archives. Note, + however, that after the multipart upload completes, you cannot + call the List Parts operation and the multipart upload will + not appear in List Multipart Uploads response, even if + idempotent complete is possible. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading Large Archives in Parts (Multipart Upload)`_ and + `Complete Multipart Upload`_ in the Amazon Glacier Developer + Guide . + + :type checksum: string + :param checksum: The SHA256 tree hash of the entire archive. It is the + tree hash of SHA256 tree hash of the individual parts. If the value + you specify in the request does not match the SHA256 tree hash of + the final assembled archive as computed by Amazon Glacier, Amazon + Glacier returns an error and the request fails. + + :type vault_name: str + :param vault_name: The name of the vault. + + :type upload_id: str + :param upload_id: The upload ID of the multipart upload. + + :type sha256_treehash: str + :param sha256_treehash: The SHA256 tree hash of the entire archive. + It is the tree hash of SHA256 tree hash of the individual parts. + If the value you specify in the request does not match the SHA256 + tree hash of the final assembled archive as computed by Amazon + Glacier, Amazon Glacier returns an error and the request fails. + + :type archive_size: int + :param archive_size: The total size, in bytes, of the entire + archive. This value should be the sum of all the sizes of + the individual parts that you uploaded. + """ + response_headers = [('x-amz-archive-id', u'ArchiveId'), + ('Location', u'Location')] + headers = {'x-amz-sha256-tree-hash': sha256_treehash, + 'x-amz-archive-size': str(archive_size)} + uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) + response = self.make_request('POST', uri, headers=headers, + ok_responses=(201,), + response_headers=response_headers) + return response + + def abort_multipart_upload(self, vault_name, upload_id): + """ + This operation aborts a multipart upload identified by the + upload ID. + + After the Abort Multipart Upload request succeeds, you cannot + upload any more parts to the multipart upload or complete the + multipart upload. Aborting a completed upload fails. However, + aborting an already-aborted upload will succeed, for a short + time. For more information about uploading a part and + completing a multipart upload, see UploadMultipartPart and + CompleteMultipartUpload. + + This operation is idempotent. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Working with Archives in Amazon Glacier`_ and `Abort + Multipart Upload`_ in the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type upload_id: string + :param upload_id: The upload ID of the multipart upload to delete. + """ + uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) + return self.make_request('DELETE', uri, ok_responses=(204,)) + + def list_multipart_uploads(self, vault_name, limit=None, marker=None): + """ + This operation lists in-progress multipart uploads for the + specified vault. An in-progress multipart upload is a + multipart upload that has been initiated by an + InitiateMultipartUpload request, but has not yet been + completed or aborted. The list returned in the List Multipart + Upload response has no guaranteed order. + + The List Multipart Uploads operation supports pagination. By + default, this operation returns up to 1,000 multipart uploads + in the response. You should always check the response for a + `marker` at which to continue the list; if there are no more + items the `marker` is `null`. To return a list of multipart + uploads that begins at a specific upload, set the `marker` + request parameter to the value you obtained from a previous + List Multipart Upload request. You can also limit the number + of uploads returned in the response by specifying the `limit` + parameter in the request. + + Note the difference between this operation and listing parts + (ListParts). The List Multipart Uploads operation lists all + multipart uploads for a vault and does not require a multipart + upload ID. The List Parts operation requires a multipart + upload ID since parts are associated with a single upload. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Working with Archives in Amazon Glacier`_ and `List Multipart + Uploads `_ in the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type limit: string + :param limit: Specifies the maximum number of uploads returned in the + response body. If this value is not specified, the List Uploads + operation returns up to 1,000 uploads. + + :type marker: string + :param marker: An opaque string used for pagination. This value + specifies the upload at which the listing of uploads should begin. + Get the marker value from a previous List Uploads response. You + need only include the marker if you are continuing the pagination + of results started in a previous List Uploads request. + """ + params = {} + if limit: + params['limit'] = limit + if marker: + params['marker'] = marker + uri = 'vaults/%s/multipart-uploads' % vault_name + return self.make_request('GET', uri, params=params) + + def list_parts(self, vault_name, upload_id, limit=None, marker=None): + """ + This operation lists the parts of an archive that have been + uploaded in a specific multipart upload. You can make this + request at any time during an in-progress multipart upload + before you complete the upload (see CompleteMultipartUpload. + List Parts returns an error for completed uploads. The list + returned in the List Parts response is sorted by part range. + + The List Parts operation supports pagination. By default, this + operation returns up to 1,000 uploaded parts in the response. + You should always check the response for a `marker` at which + to continue the list; if there are no more items the `marker` + is `null`. To return a list of parts that begins at a specific + part, set the `marker` request parameter to the value you + obtained from a previous List Parts request. You can also + limit the number of parts returned in the response by + specifying the `limit` parameter in the request. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Working with Archives in Amazon Glacier`_ and `List Parts`_ + in the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type upload_id: string + :param upload_id: The upload ID of the multipart upload. + + :type marker: string + :param marker: An opaque string used for pagination. This value + specifies the part at which the listing of parts should begin. Get + the marker value from the response of a previous List Parts + response. You need only include the marker if you are continuing + the pagination of results started in a previous List Parts request. + + :type limit: string + :param limit: Specifies the maximum number of parts returned in the + response body. If this value is not specified, the List Parts + operation returns up to 1,000 uploads. + """ + params = {} + if limit: + params['limit'] = limit + if marker: + params['marker'] = marker + uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) + return self.make_request('GET', uri, params=params) + + def upload_part(self, vault_name, upload_id, linear_hash, + tree_hash, byte_range, part_data): + """ + This operation uploads a part of an archive. You can upload + archive parts in any order. You can also upload them in + parallel. You can upload up to 10,000 parts for a multipart + upload. + + Amazon Glacier rejects your upload part request if any of the + following conditions is true: + + + + **SHA256 tree hash does not match**To ensure that part data + is not corrupted in transmission, you compute a SHA256 tree + hash of the part and include it in your request. Upon + receiving the part data, Amazon Glacier also computes a SHA256 + tree hash. If these hash values don't match, the operation + fails. For information about computing a SHA256 tree hash, see + `Computing Checksums`_. + + **Part size does not match**The size of each part except the + last must match the size specified in the corresponding + InitiateMultipartUpload request. The size of the last part + must be the same size as, or smaller than, the specified size. + If you upload a part whose size is smaller than the part size + you specified in your initiate multipart upload request and + that part is not the last part, then the upload part request + will succeed. However, the subsequent Complete Multipart + Upload request will fail. + + **Range does not align**The byte range value in the request + does not align with the part size specified in the + corresponding initiate request. For example, if you specify a + part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4 + MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid + part ranges. However, if you set a range value of 2 MB to 6 + MB, the range does not align with the part size and the upload + will fail. + + + This operation is idempotent. If you upload the same part + multiple times, the data included in the most recent request + overwrites the previously uploaded data. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading Large Archives in Parts (Multipart Upload)`_ and + `Upload Part `_ in the Amazon Glacier Developer Guide . + + :type vault_name: str + :param vault_name: The name of the vault. + + :type linear_hash: str + :param linear_hash: The SHA256 checksum (a linear hash) of the + payload. + + :type tree_hash: str + :param tree_hash: The user-computed SHA256 tree hash of the + payload. For more information on computing the + tree hash, see http://goo.gl/u7chF. + + :type upload_id: str + :param upload_id: The unique ID associated with this upload + operation. + + :type byte_range: tuple of ints + :param byte_range: Identifies the range of bytes in the assembled + archive that will be uploaded in this part. Amazon Glacier uses + this information to assemble the archive in the proper sequence. + The format of this header follows RFC 2616. An example header is + Content-Range:bytes 0-4194303/*. + + :type part_data: bytes + :param part_data: The data to be uploaded for the part + """ + headers = {'x-amz-content-sha256': linear_hash, + 'x-amz-sha256-tree-hash': tree_hash, + 'Content-Range': 'bytes %d-%d/*' % byte_range} + response_headers = [('x-amz-sha256-tree-hash', u'TreeHash')] + uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) + return self.make_request('PUT', uri, headers=headers, + data=part_data, ok_responses=(204,), + response_headers=response_headers) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/glacier/layer2.py b/desktop/core/ext-py/boto-2.38.0/boto/glacier/layer2.py new file mode 100644 index 0000000000000000000000000000000000000000..abc36199bb1fee973b45594a833781ca46a85015 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/glacier/layer2.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.glacier.layer1 import Layer1 +from boto.glacier.vault import Vault + + +class Layer2(object): + """ + Provides a more pythonic and friendly interface to Glacier based on Layer1 + """ + + def __init__(self, *args, **kwargs): + # Accept a passed in layer1, mainly to allow easier testing + if "layer1" in kwargs: + self.layer1 = kwargs["layer1"] + else: + self.layer1 = Layer1(*args, **kwargs) + + def create_vault(self, name): + """Creates a vault. + + :type name: str + :param name: The name of the vault + + :rtype: :class:`boto.glacier.vault.Vault` + :return: A Vault object representing the vault. + """ + self.layer1.create_vault(name) + return self.get_vault(name) + + def delete_vault(self, name): + """Delete a vault. + + This operation deletes a vault. Amazon Glacier will delete a + vault only if there are no archives in the vault as per the + last inventory and there have been no writes to the vault + since the last inventory. If either of these conditions is not + satisfied, the vault deletion fails (that is, the vault is not + removed) and Amazon Glacier returns an error. + + This operation is idempotent, you can send the same request + multiple times and it has no further effect after the first + time Amazon Glacier delete the specified vault. + + :type vault_name: str + :param vault_name: The name of the vault to delete. + """ + return self.layer1.delete_vault(name) + + def get_vault(self, name): + """ + Get an object representing a named vault from Glacier. This + operation does not check if the vault actually exists. + + :type name: str + :param name: The name of the vault + + :rtype: :class:`boto.glacier.vault.Vault` + :return: A Vault object representing the vault. + """ + response_data = self.layer1.describe_vault(name) + return Vault(self.layer1, response_data) + + def list_vaults(self): + """ + Return a list of all vaults associated with the account ID. + + :rtype: List of :class:`boto.glacier.vault.Vault` + :return: A list of Vault objects. + """ + vaults = [] + marker = None + while True: + response_data = self.layer1.list_vaults(marker=marker, limit=1000) + vaults.extend([Vault(self.layer1, rd) for rd in response_data['VaultList']]) + marker = response_data.get('Marker') + if not marker: + break + + return vaults diff --git a/desktop/core/ext-py/boto-2.38.0/boto/glacier/response.py b/desktop/core/ext-py/boto-2.38.0/boto/glacier/response.py new file mode 100644 index 0000000000000000000000000000000000000000..c7a2612c03a9ae44dad42c64dc25e7e4e84fe042 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/glacier/response.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import json + + +class GlacierResponse(dict): + """ + Represents a response from Glacier layer1. It acts as a dictionary + containing the combined keys received via JSON in the body (if + supplied) and headers. + """ + def __init__(self, http_response, response_headers): + self.http_response = http_response + self.status = http_response.status + self[u'RequestId'] = http_response.getheader('x-amzn-requestid') + if response_headers: + for header_name, item_name in response_headers: + self[item_name] = http_response.getheader(header_name) + if http_response.status != 204: + if http_response.getheader('Content-Type') == 'application/json': + body = json.loads(http_response.read().decode('utf-8')) + self.update(body) + size = http_response.getheader('Content-Length', None) + if size is not None: + self.size = size + + def read(self, amt=None): + "Reads and returns the response body, or up to the next amt bytes." + return self.http_response.read(amt) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/glacier/utils.py b/desktop/core/ext-py/boto-2.38.0/boto/glacier/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..98847e3f16ddc81681bd0a0f6c43595ab580abe7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/glacier/utils.py @@ -0,0 +1,175 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import hashlib +import math +import binascii + +from boto.compat import six + + +_MEGABYTE = 1024 * 1024 +DEFAULT_PART_SIZE = 4 * _MEGABYTE +MAXIMUM_NUMBER_OF_PARTS = 10000 + + +def minimum_part_size(size_in_bytes, default_part_size=DEFAULT_PART_SIZE): + """Calculate the minimum part size needed for a multipart upload. + + Glacier allows a maximum of 10,000 parts per upload. It also + states that the maximum archive size is 10,000 * 4 GB, which means + the part size can range from 1MB to 4GB (provided it is one 1MB + multiplied by a power of 2). + + This function will compute what the minimum part size must be in + order to upload a file of size ``size_in_bytes``. + + It will first check if ``default_part_size`` is sufficient for + a part size given the ``size_in_bytes``. If this is not the case, + then the smallest part size than can accomodate a file of size + ``size_in_bytes`` will be returned. + + If the file size is greater than the maximum allowed archive + size of 10,000 * 4GB, a ``ValueError`` will be raised. + + """ + # The default part size (4 MB) will be too small for a very large + # archive, as there is a limit of 10,000 parts in a multipart upload. + # This puts the maximum allowed archive size with the default part size + # at 40,000 MB. We need to do a sanity check on the part size, and find + # one that works if the default is too small. + part_size = _MEGABYTE + if (default_part_size * MAXIMUM_NUMBER_OF_PARTS) < size_in_bytes: + if size_in_bytes > (4096 * _MEGABYTE * 10000): + raise ValueError("File size too large: %s" % size_in_bytes) + min_part_size = size_in_bytes / 10000 + power = 3 + while part_size < min_part_size: + part_size = math.ldexp(_MEGABYTE, power) + power += 1 + part_size = int(part_size) + else: + part_size = default_part_size + return part_size + + +def chunk_hashes(bytestring, chunk_size=_MEGABYTE): + chunk_count = int(math.ceil(len(bytestring) / float(chunk_size))) + hashes = [] + for i in range(chunk_count): + start = i * chunk_size + end = (i + 1) * chunk_size + hashes.append(hashlib.sha256(bytestring[start:end]).digest()) + if not hashes: + return [hashlib.sha256(b'').digest()] + return hashes + + +def tree_hash(fo): + """ + Given a hash of each 1MB chunk (from chunk_hashes) this will hash + together adjacent hashes until it ends up with one big one. So a + tree of hashes. + """ + hashes = [] + hashes.extend(fo) + while len(hashes) > 1: + new_hashes = [] + while True: + if len(hashes) > 1: + first = hashes.pop(0) + second = hashes.pop(0) + new_hashes.append(hashlib.sha256(first + second).digest()) + elif len(hashes) == 1: + only = hashes.pop(0) + new_hashes.append(only) + else: + break + hashes.extend(new_hashes) + return hashes[0] + + +def compute_hashes_from_fileobj(fileobj, chunk_size=1024 * 1024): + """Compute the linear and tree hash from a fileobj. + + This function will compute the linear/tree hash of a fileobj + in a single pass through the fileobj. + + :param fileobj: A file like object. + + :param chunk_size: The size of the chunks to use for the tree + hash. This is also the buffer size used to read from + `fileobj`. + + :rtype: tuple + :return: A tuple of (linear_hash, tree_hash). Both hashes + are returned in hex. + + """ + # Python 3+, not binary + if six.PY3 and hasattr(fileobj, 'mode') and 'b' not in fileobj.mode: + raise ValueError('File-like object must be opened in binary mode!') + + linear_hash = hashlib.sha256() + chunks = [] + chunk = fileobj.read(chunk_size) + while chunk: + # It's possible to get a file-like object that has no mode (checked + # above) and returns something other than bytes (e.g. str). So here + # we try to catch that and encode to bytes. + if not isinstance(chunk, bytes): + chunk = chunk.encode(getattr(fileobj, 'encoding', '') or 'utf-8') + linear_hash.update(chunk) + chunks.append(hashlib.sha256(chunk).digest()) + chunk = fileobj.read(chunk_size) + if not chunks: + chunks = [hashlib.sha256(b'').digest()] + return linear_hash.hexdigest(), bytes_to_hex(tree_hash(chunks)) + + +def bytes_to_hex(str_as_bytes): + return binascii.hexlify(str_as_bytes) + + +def tree_hash_from_str(str_as_bytes): + """ + + :type str_as_bytes: str + :param str_as_bytes: The string for which to compute the tree hash. + + :rtype: str + :return: The computed tree hash, returned as hex. + + """ + return bytes_to_hex(tree_hash(chunk_hashes(str_as_bytes))) + + +class ResettingFileSender(object): + def __init__(self, archive): + self._archive = archive + self._starting_offset = archive.tell() + + def __call__(self, connection, method, path, body, headers): + try: + connection.request(method, path, self._archive, headers) + return connection.getresponse() + finally: + self._archive.seek(self._starting_offset) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/glacier/vault.py b/desktop/core/ext-py/boto-2.38.0/boto/glacier/vault.py new file mode 100644 index 0000000000000000000000000000000000000000..45d276cadb56347130133e509f98c00359e6ce3a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/glacier/vault.py @@ -0,0 +1,450 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# Copyright (c) 2012 Robie Basak +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import codecs +from boto.glacier.exceptions import UploadArchiveError +from boto.glacier.job import Job +from boto.glacier.writer import compute_hashes_from_fileobj, \ + resume_file_upload, Writer +from boto.glacier.concurrent import ConcurrentUploader +from boto.glacier.utils import minimum_part_size, DEFAULT_PART_SIZE +import os.path + + +_MEGABYTE = 1024 * 1024 +_GIGABYTE = 1024 * _MEGABYTE + +MAXIMUM_ARCHIVE_SIZE = 10000 * 4 * _GIGABYTE +MAXIMUM_NUMBER_OF_PARTS = 10000 + + +class Vault(object): + + DefaultPartSize = DEFAULT_PART_SIZE + SingleOperationThreshold = 100 * _MEGABYTE + + ResponseDataElements = (('VaultName', 'name', None), + ('VaultARN', 'arn', None), + ('CreationDate', 'creation_date', None), + ('LastInventoryDate', 'last_inventory_date', None), + ('SizeInBytes', 'size', 0), + ('NumberOfArchives', 'number_of_archives', 0)) + + def __init__(self, layer1, response_data=None): + self.layer1 = layer1 + if response_data: + for response_name, attr_name, default in self.ResponseDataElements: + value = response_data[response_name] + setattr(self, attr_name, value) + else: + for response_name, attr_name, default in self.ResponseDataElements: + setattr(self, attr_name, default) + + def __repr__(self): + return 'Vault("%s")' % self.arn + + def delete(self): + """ + Delete's this vault. WARNING! + """ + self.layer1.delete_vault(self.name) + + def upload_archive(self, filename, description=None): + """ + Adds an archive to a vault. For archives greater than 100MB the + multipart upload will be used. + + :type file: str + :param file: A filename to upload + + :type description: str + :param description: An optional description for the archive. + + :rtype: str + :return: The archive id of the newly created archive + """ + if os.path.getsize(filename) > self.SingleOperationThreshold: + return self.create_archive_from_file(filename, description=description) + return self._upload_archive_single_operation(filename, description) + + def _upload_archive_single_operation(self, filename, description): + """ + Adds an archive to a vault in a single operation. It's recommended for + archives less than 100MB + + :type file: str + :param file: A filename to upload + + :type description: str + :param description: A description for the archive. + + :rtype: str + :return: The archive id of the newly created archive + """ + with open(filename, 'rb') as fileobj: + linear_hash, tree_hash = compute_hashes_from_fileobj(fileobj) + fileobj.seek(0) + response = self.layer1.upload_archive(self.name, fileobj, + linear_hash, tree_hash, + description) + return response['ArchiveId'] + + def create_archive_writer(self, part_size=DefaultPartSize, + description=None): + """ + Create a new archive and begin a multi-part upload to it. + Returns a file-like object to which the data for the archive + can be written. Once all the data is written the file-like + object should be closed, you can then call the get_archive_id + method on it to get the ID of the created archive. + + :type part_size: int + :param part_size: The part size for the multipart upload. + + :type description: str + :param description: An optional description for the archive. + + :rtype: :class:`boto.glacier.writer.Writer` + :return: A Writer object that to which the archive data + should be written. + """ + response = self.layer1.initiate_multipart_upload(self.name, + part_size, + description) + return Writer(self, response['UploadId'], part_size=part_size) + + def create_archive_from_file(self, filename=None, file_obj=None, + description=None, upload_id_callback=None): + """ + Create a new archive and upload the data from the given file + or file-like object. + + :type filename: str + :param filename: A filename to upload + + :type file_obj: file + :param file_obj: A file-like object to upload + + :type description: str + :param description: An optional description for the archive. + + :type upload_id_callback: function + :param upload_id_callback: if set, call with the upload_id as the + only parameter when it becomes known, to enable future calls + to resume_archive_from_file in case resume is needed. + + :rtype: str + :return: The archive id of the newly created archive + """ + part_size = self.DefaultPartSize + if not file_obj: + file_size = os.path.getsize(filename) + try: + part_size = minimum_part_size(file_size, part_size) + except ValueError: + raise UploadArchiveError("File size of %s bytes exceeds " + "40,000 GB archive limit of Glacier.") + file_obj = open(filename, "rb") + writer = self.create_archive_writer( + description=description, + part_size=part_size) + if upload_id_callback: + upload_id_callback(writer.upload_id) + while True: + data = file_obj.read(part_size) + if not data: + break + writer.write(data) + writer.close() + return writer.get_archive_id() + + @staticmethod + def _range_string_to_part_index(range_string, part_size): + start, inside_end = [int(value) for value in range_string.split('-')] + end = inside_end + 1 + length = end - start + if length == part_size + 1: + # Off-by-one bug in Amazon's Glacier implementation, + # see: https://forums.aws.amazon.com/thread.jspa?threadID=106866 + # Workaround: since part_size is too big by one byte, adjust it + end -= 1 + inside_end -= 1 + length -= 1 + assert not (start % part_size), ( + "upload part start byte is not on a part boundary") + assert (length <= part_size), "upload part is bigger than part size" + return start // part_size + + def resume_archive_from_file(self, upload_id, filename=None, + file_obj=None): + """Resume upload of a file already part-uploaded to Glacier. + + The resumption of an upload where the part-uploaded section is empty + is a valid degenerate case that this function can handle. + + One and only one of filename or file_obj must be specified. + + :type upload_id: str + :param upload_id: existing Glacier upload id of upload being resumed. + + :type filename: str + :param filename: file to open for resume + + :type fobj: file + :param fobj: file-like object containing local data to resume. This + must read from the start of the entire upload, not just from the + point being resumed. Use fobj.seek(0) to achieve this if necessary. + + :rtype: str + :return: The archive id of the newly created archive + + """ + part_list_response = self.list_all_parts(upload_id) + part_size = part_list_response['PartSizeInBytes'] + + part_hash_map = {} + for part_desc in part_list_response['Parts']: + part_index = self._range_string_to_part_index( + part_desc['RangeInBytes'], part_size) + part_tree_hash = codecs.decode(part_desc['SHA256TreeHash'], 'hex_codec') + part_hash_map[part_index] = part_tree_hash + + if not file_obj: + file_obj = open(filename, "rb") + + return resume_file_upload( + self, upload_id, part_size, file_obj, part_hash_map) + + def concurrent_create_archive_from_file(self, filename, description, + **kwargs): + """ + Create a new archive from a file and upload the given + file. + + This is a convenience method around the + :class:`boto.glacier.concurrent.ConcurrentUploader` + class. This method will perform a multipart upload + and upload the parts of the file concurrently. + + :type filename: str + :param filename: A filename to upload + + :param kwargs: Additional kwargs to pass through to + :py:class:`boto.glacier.concurrent.ConcurrentUploader`. + You can pass any argument besides the ``api`` and + ``vault_name`` param (these arguments are already + passed to the ``ConcurrentUploader`` for you). + + :raises: `boto.glacier.exception.UploadArchiveError` is an error + occurs during the upload process. + + :rtype: str + :return: The archive id of the newly created archive + + """ + uploader = ConcurrentUploader(self.layer1, self.name, **kwargs) + archive_id = uploader.upload(filename, description) + return archive_id + + def retrieve_archive(self, archive_id, sns_topic=None, + description=None): + """ + Initiate a archive retrieval job to download the data from an + archive. You will need to wait for the notification from + Amazon (via SNS) before you can actually download the data, + this takes around 4 hours. + + :type archive_id: str + :param archive_id: The id of the archive + + :type description: str + :param description: An optional description for the job. + + :type sns_topic: str + :param sns_topic: The Amazon SNS topic ARN where Amazon Glacier + sends notification when the job is completed and the output + is ready for you to download. + + :rtype: :class:`boto.glacier.job.Job` + :return: A Job object representing the retrieval job. + """ + job_data = {'Type': 'archive-retrieval', + 'ArchiveId': archive_id} + if sns_topic is not None: + job_data['SNSTopic'] = sns_topic + if description is not None: + job_data['Description'] = description + + response = self.layer1.initiate_job(self.name, job_data) + return self.get_job(response['JobId']) + + def retrieve_inventory(self, sns_topic=None, + description=None, byte_range=None, + start_date=None, end_date=None, + limit=None): + """ + Initiate a inventory retrieval job to list the items in the + vault. You will need to wait for the notification from + Amazon (via SNS) before you can actually download the data, + this takes around 4 hours. + + :type description: str + :param description: An optional description for the job. + + :type sns_topic: str + :param sns_topic: The Amazon SNS topic ARN where Amazon Glacier + sends notification when the job is completed and the output + is ready for you to download. + + :type byte_range: str + :param byte_range: Range of bytes to retrieve. + + :type start_date: DateTime + :param start_date: Beginning of the date range to query. + + :type end_date: DateTime + :param end_date: End of the date range to query. + + :type limit: int + :param limit: Limits the number of results returned. + + :rtype: str + :return: The ID of the job + """ + job_data = {'Type': 'inventory-retrieval'} + if sns_topic is not None: + job_data['SNSTopic'] = sns_topic + if description is not None: + job_data['Description'] = description + if byte_range is not None: + job_data['RetrievalByteRange'] = byte_range + if start_date is not None or end_date is not None or limit is not None: + rparams = {} + + if start_date is not None: + rparams['StartDate'] = start_date.strftime('%Y-%m-%dT%H:%M:%S%Z') + if end_date is not None: + rparams['EndDate'] = end_date.strftime('%Y-%m-%dT%H:%M:%S%Z') + if limit is not None: + rparams['Limit'] = limit + + job_data['InventoryRetrievalParameters'] = rparams + + response = self.layer1.initiate_job(self.name, job_data) + return response['JobId'] + + def retrieve_inventory_job(self, **kwargs): + """ + Identical to ``retrieve_inventory``, but returns a ``Job`` instance + instead of just the job ID. + + :type description: str + :param description: An optional description for the job. + + :type sns_topic: str + :param sns_topic: The Amazon SNS topic ARN where Amazon Glacier + sends notification when the job is completed and the output + is ready for you to download. + + :type byte_range: str + :param byte_range: Range of bytes to retrieve. + + :type start_date: DateTime + :param start_date: Beginning of the date range to query. + + :type end_date: DateTime + :param end_date: End of the date range to query. + + :type limit: int + :param limit: Limits the number of results returned. + + :rtype: :class:`boto.glacier.job.Job` + :return: A Job object representing the retrieval job. + """ + job_id = self.retrieve_inventory(**kwargs) + return self.get_job(job_id) + + def delete_archive(self, archive_id): + """ + This operation deletes an archive from the vault. + + :type archive_id: str + :param archive_id: The ID for the archive to be deleted. + """ + return self.layer1.delete_archive(self.name, archive_id) + + def get_job(self, job_id): + """ + Get an object representing a job in progress. + + :type job_id: str + :param job_id: The ID of the job + + :rtype: :class:`boto.glacier.job.Job` + :return: A Job object representing the job. + """ + response_data = self.layer1.describe_job(self.name, job_id) + return Job(self, response_data) + + def list_jobs(self, completed=None, status_code=None): + """ + Return a list of Job objects related to this vault. + + :type completed: boolean + :param completed: Specifies the state of the jobs to return. + If a value of True is passed, only completed jobs will + be returned. If a value of False is passed, only + uncompleted jobs will be returned. If no value is + passed, all jobs will be returned. + + :type status_code: string + :param status_code: Specifies the type of job status to return. + Valid values are: InProgress|Succeeded|Failed. If not + specified, jobs with all status codes are returned. + + :rtype: list of :class:`boto.glacier.job.Job` + :return: A list of Job objects related to this vault. + """ + response_data = self.layer1.list_jobs(self.name, completed, + status_code) + return [Job(self, jd) for jd in response_data['JobList']] + + def list_all_parts(self, upload_id): + """Automatically make and combine multiple calls to list_parts. + + Call list_parts as necessary, combining the results in case multiple + calls were required to get data on all available parts. + + """ + result = self.layer1.list_parts(self.name, upload_id) + marker = result['Marker'] + while marker: + additional_result = self.layer1.list_parts( + self.name, upload_id, marker=marker) + result['Parts'].extend(additional_result['Parts']) + marker = additional_result['Marker'] + # The marker makes no sense in an unpaginated result, and clearing it + # makes testing easier. This also has the nice property that the result + # is a normal (but expanded) response. + result['Marker'] = None + return result diff --git a/desktop/core/ext-py/boto-2.38.0/boto/glacier/writer.py b/desktop/core/ext-py/boto-2.38.0/boto/glacier/writer.py new file mode 100644 index 0000000000000000000000000000000000000000..fa3161ab3cae9cb4f9f7b9049c648e2ef70908cb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/glacier/writer.py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# Copyright (c) 2012 Robie Basak +# Tree hash implementation from Aaron Brady bradya@gmail.com +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import hashlib + +from boto.glacier.utils import chunk_hashes, tree_hash, bytes_to_hex +# This import is provided for backwards compatibility. This function is +# now in boto.glacier.utils, but any existing code can still import +# this directly from this module. +from boto.glacier.utils import compute_hashes_from_fileobj + + +_ONE_MEGABYTE = 1024 * 1024 + + +class _Partitioner(object): + """Convert variable-size writes into part-sized writes + + Call write(data) with variable sized data as needed to write all data. Call + flush() after all data is written. + + This instance will call send_fn(part_data) as needed in part_size pieces, + except for the final part which may be shorter than part_size. Make sure to + call flush() to ensure that a short final part results in a final send_fn + call. + + """ + def __init__(self, part_size, send_fn): + self.part_size = part_size + self.send_fn = send_fn + self._buffer = [] + self._buffer_size = 0 + + def write(self, data): + if data == b'': + return + self._buffer.append(data) + self._buffer_size += len(data) + while self._buffer_size > self.part_size: + self._send_part() + + def _send_part(self): + data = b''.join(self._buffer) + # Put back any data remaining over the part size into the + # buffer + if len(data) > self.part_size: + self._buffer = [data[self.part_size:]] + self._buffer_size = len(self._buffer[0]) + else: + self._buffer = [] + self._buffer_size = 0 + # The part we will send + part = data[:self.part_size] + self.send_fn(part) + + def flush(self): + if self._buffer_size > 0: + self._send_part() + + +class _Uploader(object): + """Upload to a Glacier upload_id. + + Call upload_part for each part (in any order) and then close to complete + the upload. + + """ + def __init__(self, vault, upload_id, part_size, chunk_size=_ONE_MEGABYTE): + self.vault = vault + self.upload_id = upload_id + self.part_size = part_size + self.chunk_size = chunk_size + self.archive_id = None + + self._uploaded_size = 0 + self._tree_hashes = [] + + self.closed = False + + def _insert_tree_hash(self, index, raw_tree_hash): + list_length = len(self._tree_hashes) + if index >= list_length: + self._tree_hashes.extend([None] * (list_length - index + 1)) + self._tree_hashes[index] = raw_tree_hash + + def upload_part(self, part_index, part_data): + """Upload a part to Glacier. + + :param part_index: part number where 0 is the first part + :param part_data: data to upload corresponding to this part + + """ + if self.closed: + raise ValueError("I/O operation on closed file") + # Create a request and sign it + part_tree_hash = tree_hash(chunk_hashes(part_data, self.chunk_size)) + self._insert_tree_hash(part_index, part_tree_hash) + + hex_tree_hash = bytes_to_hex(part_tree_hash) + linear_hash = hashlib.sha256(part_data).hexdigest() + start = self.part_size * part_index + content_range = (start, + (start + len(part_data)) - 1) + response = self.vault.layer1.upload_part(self.vault.name, + self.upload_id, + linear_hash, + hex_tree_hash, + content_range, part_data) + response.read() + self._uploaded_size += len(part_data) + + def skip_part(self, part_index, part_tree_hash, part_length): + """Skip uploading of a part. + + The final close call needs to calculate the tree hash and total size + of all uploaded data, so this is the mechanism for resume + functionality to provide it without actually uploading the data again. + + :param part_index: part number where 0 is the first part + :param part_tree_hash: binary tree_hash of part being skipped + :param part_length: length of part being skipped + + """ + if self.closed: + raise ValueError("I/O operation on closed file") + self._insert_tree_hash(part_index, part_tree_hash) + self._uploaded_size += part_length + + def close(self): + if self.closed: + return + if None in self._tree_hashes: + raise RuntimeError("Some parts were not uploaded.") + # Complete the multiplart glacier upload + hex_tree_hash = bytes_to_hex(tree_hash(self._tree_hashes)) + response = self.vault.layer1.complete_multipart_upload( + self.vault.name, self.upload_id, hex_tree_hash, + self._uploaded_size) + self.archive_id = response['ArchiveId'] + self.closed = True + + +def generate_parts_from_fobj(fobj, part_size): + data = fobj.read(part_size) + while data: + yield data.encode('utf-8') + data = fobj.read(part_size) + + +def resume_file_upload(vault, upload_id, part_size, fobj, part_hash_map, + chunk_size=_ONE_MEGABYTE): + """Resume upload of a file already part-uploaded to Glacier. + + The resumption of an upload where the part-uploaded section is empty is a + valid degenerate case that this function can handle. In this case, + part_hash_map should be an empty dict. + + :param vault: boto.glacier.vault.Vault object. + :param upload_id: existing Glacier upload id of upload being resumed. + :param part_size: part size of existing upload. + :param fobj: file object containing local data to resume. This must read + from the start of the entire upload, not just from the point being + resumed. Use fobj.seek(0) to achieve this if necessary. + :param part_hash_map: {part_index: part_tree_hash, ...} of data already + uploaded. Each supplied part_tree_hash will be verified and the part + re-uploaded if there is a mismatch. + :param chunk_size: chunk size of tree hash calculation. This must be + 1 MiB for Amazon. + + """ + uploader = _Uploader(vault, upload_id, part_size, chunk_size) + for part_index, part_data in enumerate( + generate_parts_from_fobj(fobj, part_size)): + part_tree_hash = tree_hash(chunk_hashes(part_data, chunk_size)) + if (part_index not in part_hash_map or + part_hash_map[part_index] != part_tree_hash): + uploader.upload_part(part_index, part_data) + else: + uploader.skip_part(part_index, part_tree_hash, len(part_data)) + uploader.close() + return uploader.archive_id + + +class Writer(object): + """ + Presents a file-like object for writing to a Amazon Glacier + Archive. The data is written using the multi-part upload API. + """ + def __init__(self, vault, upload_id, part_size, chunk_size=_ONE_MEGABYTE): + self.uploader = _Uploader(vault, upload_id, part_size, chunk_size) + self.partitioner = _Partitioner(part_size, self._upload_part) + self.closed = False + self.next_part_index = 0 + + def write(self, data): + if self.closed: + raise ValueError("I/O operation on closed file") + self.partitioner.write(data) + + def _upload_part(self, part_data): + self.uploader.upload_part(self.next_part_index, part_data) + self.next_part_index += 1 + + def close(self): + if self.closed: + return + self.partitioner.flush() + self.uploader.close() + self.closed = True + + def get_archive_id(self): + self.close() + return self.uploader.archive_id + + @property + def current_tree_hash(self): + """ + Returns the current tree hash for the data that's been written + **so far**. + + Only once the writing is complete is the final tree hash returned. + """ + return tree_hash(self.uploader._tree_hashes) + + @property + def current_uploaded_size(self): + """ + Returns the current uploaded size for the data that's been written + **so far**. + + Only once the writing is complete is the final uploaded size returned. + """ + return self.uploader._uploaded_size + + @property + def upload_id(self): + return self.uploader.upload_id + + @property + def vault(self): + return self.uploader.vault diff --git a/desktop/core/ext-py/boto-2.38.0/boto/gs/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/gs/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..bf4c0b942ba793decfcf80d24ded113a7186b84d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/gs/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/gs/acl.py b/desktop/core/ext-py/boto-2.38.0/boto/gs/acl.py new file mode 100755 index 0000000000000000000000000000000000000000..57bdce1cbc36af46b2a147d3c3901f501c4f2ae2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/gs/acl.py @@ -0,0 +1,308 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.gs.user import User +from boto.exception import InvalidAclError + +ACCESS_CONTROL_LIST = 'AccessControlList' +ALL_AUTHENTICATED_USERS = 'AllAuthenticatedUsers' +ALL_USERS = 'AllUsers' +DISPLAY_NAME = 'DisplayName' +DOMAIN = 'Domain' +EMAIL_ADDRESS = 'EmailAddress' +ENTRY = 'Entry' +ENTRIES = 'Entries' +GROUP_BY_DOMAIN = 'GroupByDomain' +GROUP_BY_EMAIL = 'GroupByEmail' +GROUP_BY_ID = 'GroupById' +ID = 'ID' +NAME = 'Name' +OWNER = 'Owner' +PERMISSION = 'Permission' +SCOPE = 'Scope' +TYPE = 'type' +USER_BY_EMAIL = 'UserByEmail' +USER_BY_ID = 'UserById' + + +CannedACLStrings = ['private', 'public-read', 'project-private', + 'public-read-write', 'authenticated-read', + 'bucket-owner-read', 'bucket-owner-full-control'] +"""A list of Google Cloud Storage predefined (canned) ACL strings.""" + +SupportedPermissions = ['READ', 'WRITE', 'FULL_CONTROL'] +"""A list of supported ACL permissions.""" + + +class ACL(object): + + def __init__(self, parent=None): + self.parent = parent + self.entries = Entries(self) + + @property + def acl(self): + return self + + def __repr__(self): + # Owner is optional in GS ACLs. + if hasattr(self, 'owner'): + entries_repr = ['Owner:%s' % self.owner.__repr__()] + else: + entries_repr = [''] + acl_entries = self.entries + if acl_entries: + for e in acl_entries.entry_list: + entries_repr.append(e.__repr__()) + return '<%s>' % ', '.join(entries_repr) + + # Method with same signature as boto.s3.acl.ACL.add_email_grant(), to allow + # polymorphic treatment at application layer. + def add_email_grant(self, permission, email_address): + entry = Entry(type=USER_BY_EMAIL, email_address=email_address, + permission=permission) + self.entries.entry_list.append(entry) + + # Method with same signature as boto.s3.acl.ACL.add_user_grant(), to allow + # polymorphic treatment at application layer. + def add_user_grant(self, permission, user_id): + entry = Entry(permission=permission, type=USER_BY_ID, id=user_id) + self.entries.entry_list.append(entry) + + def add_group_email_grant(self, permission, email_address): + entry = Entry(type=GROUP_BY_EMAIL, email_address=email_address, + permission=permission) + self.entries.entry_list.append(entry) + + def add_group_grant(self, permission, group_id): + entry = Entry(type=GROUP_BY_ID, id=group_id, permission=permission) + self.entries.entry_list.append(entry) + + def startElement(self, name, attrs, connection): + if name.lower() == OWNER.lower(): + self.owner = User(self) + return self.owner + elif name.lower() == ENTRIES.lower(): + self.entries = Entries(self) + return self.entries + else: + return None + + def endElement(self, name, value, connection): + if name.lower() == OWNER.lower(): + pass + elif name.lower() == ENTRIES.lower(): + pass + else: + setattr(self, name, value) + + def to_xml(self): + s = '<%s>' % ACCESS_CONTROL_LIST + # Owner is optional in GS ACLs. + if hasattr(self, 'owner'): + s += self.owner.to_xml() + acl_entries = self.entries + if acl_entries: + s += acl_entries.to_xml() + s += '' % ACCESS_CONTROL_LIST + return s + + +class Entries(object): + + def __init__(self, parent=None): + self.parent = parent + # Entries is the class that represents the same-named XML + # element. entry_list is the list within this class that holds the data. + self.entry_list = [] + + def __repr__(self): + entries_repr = [] + for e in self.entry_list: + entries_repr.append(e.__repr__()) + return '' % ', '.join(entries_repr) + + def startElement(self, name, attrs, connection): + if name.lower() == ENTRY.lower(): + entry = Entry(self) + self.entry_list.append(entry) + return entry + else: + return None + + def endElement(self, name, value, connection): + if name.lower() == ENTRY.lower(): + pass + else: + setattr(self, name, value) + + def to_xml(self): + if not self.entry_list: + return '' + s = '<%s>' % ENTRIES + for entry in self.entry_list: + s += entry.to_xml() + s += '' % ENTRIES + return s + + +# Class that represents a single (Scope, Permission) entry in an ACL. +class Entry(object): + + def __init__(self, scope=None, type=None, id=None, name=None, + email_address=None, domain=None, permission=None): + if not scope: + scope = Scope(self, type, id, name, email_address, domain) + self.scope = scope + self.permission = permission + + def __repr__(self): + return '<%s: %s>' % (self.scope.__repr__(), self.permission.__repr__()) + + def startElement(self, name, attrs, connection): + if name.lower() == SCOPE.lower(): + # The following if statement used to look like this: + # if not TYPE in attrs: + # which caused problems because older versions of the + # AttributesImpl class in the xml.sax library neglected to include + # a __contains__() method (which Python calls to implement the + # 'in' operator). So when you use the in operator, like the if + # statement above, Python invokes the __getiter__() method with + # index 0, which raises an exception. More recent versions of + # xml.sax include the __contains__() method, rendering the in + # operator functional. The work-around here is to formulate the + # if statement as below, which is the legal way to query + # AttributesImpl for containment (and is also how the added + # __contains__() method works). At one time gsutil disallowed + # xmlplus-based parsers, until this more specific problem was + # determined. + if TYPE not in attrs: + raise InvalidAclError('Missing "%s" in "%s" part of ACL' % + (TYPE, SCOPE)) + self.scope = Scope(self, attrs[TYPE]) + return self.scope + elif name.lower() == PERMISSION.lower(): + pass + else: + return None + + def endElement(self, name, value, connection): + if name.lower() == SCOPE.lower(): + pass + elif name.lower() == PERMISSION.lower(): + value = value.strip() + if not value in SupportedPermissions: + raise InvalidAclError('Invalid Permission "%s"' % value) + self.permission = value + else: + setattr(self, name, value) + + def to_xml(self): + s = '<%s>' % ENTRY + s += self.scope.to_xml() + s += '<%s>%s' % (PERMISSION, self.permission, PERMISSION) + s += '' % ENTRY + return s + + +class Scope(object): + + # Map from Scope type.lower() to lower-cased list of allowed sub-elems. + ALLOWED_SCOPE_TYPE_SUB_ELEMS = { + ALL_AUTHENTICATED_USERS.lower() : [], + ALL_USERS.lower() : [], + GROUP_BY_DOMAIN.lower() : [DOMAIN.lower()], + GROUP_BY_EMAIL.lower() : [ + DISPLAY_NAME.lower(), EMAIL_ADDRESS.lower(), NAME.lower()], + GROUP_BY_ID.lower() : [DISPLAY_NAME.lower(), ID.lower(), NAME.lower()], + USER_BY_EMAIL.lower() : [ + DISPLAY_NAME.lower(), EMAIL_ADDRESS.lower(), NAME.lower()], + USER_BY_ID.lower() : [DISPLAY_NAME.lower(), ID.lower(), NAME.lower()] + } + + def __init__(self, parent, type=None, id=None, name=None, + email_address=None, domain=None): + self.parent = parent + self.type = type + self.name = name + self.id = id + self.domain = domain + self.email_address = email_address + if self.type.lower() not in self.ALLOWED_SCOPE_TYPE_SUB_ELEMS: + raise InvalidAclError('Invalid %s %s "%s" ' % + (SCOPE, TYPE, self.type)) + + def __repr__(self): + named_entity = None + if self.id: + named_entity = self.id + elif self.email_address: + named_entity = self.email_address + elif self.domain: + named_entity = self.domain + if named_entity: + return '<%s: %s>' % (self.type, named_entity) + else: + return '<%s>' % self.type + + def startElement(self, name, attrs, connection): + if (not name.lower() in + self.ALLOWED_SCOPE_TYPE_SUB_ELEMS[self.type.lower()]): + raise InvalidAclError('Element "%s" not allowed in %s %s "%s" ' % + (name, SCOPE, TYPE, self.type)) + return None + + def endElement(self, name, value, connection): + value = value.strip() + if name.lower() == DOMAIN.lower(): + self.domain = value + elif name.lower() == EMAIL_ADDRESS.lower(): + self.email_address = value + elif name.lower() == ID.lower(): + self.id = value + elif name.lower() == NAME.lower(): + self.name = value + else: + setattr(self, name, value) + + def to_xml(self): + s = '<%s type="%s">' % (SCOPE, self.type) + if (self.type.lower() == ALL_AUTHENTICATED_USERS.lower() + or self.type.lower() == ALL_USERS.lower()): + pass + elif self.type.lower() == GROUP_BY_DOMAIN.lower(): + s += '<%s>%s' % (DOMAIN, self.domain, DOMAIN) + elif (self.type.lower() == GROUP_BY_EMAIL.lower() + or self.type.lower() == USER_BY_EMAIL.lower()): + s += '<%s>%s' % (EMAIL_ADDRESS, self.email_address, + EMAIL_ADDRESS) + if self.name: + s += '<%s>%s' % (NAME, self.name, NAME) + elif (self.type.lower() == GROUP_BY_ID.lower() + or self.type.lower() == USER_BY_ID.lower()): + s += '<%s>%s' % (ID, self.id, ID) + if self.name: + s += '<%s>%s' % (NAME, self.name, NAME) + else: + raise InvalidAclError('Invalid scope type "%s" ', self.type) + + s += '' % SCOPE + return s diff --git a/desktop/core/ext-py/boto-2.38.0/boto/gs/bucket.py b/desktop/core/ext-py/boto-2.38.0/boto/gs/bucket.py new file mode 100644 index 0000000000000000000000000000000000000000..37636fb87616fd26a15eeaceabe0b3260ac40c87 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/gs/bucket.py @@ -0,0 +1,989 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import re +import urllib +import xml.sax + +import boto +from boto import handler +from boto.resultset import ResultSet +from boto.exception import GSResponseError +from boto.exception import InvalidAclError +from boto.gs.acl import ACL, CannedACLStrings +from boto.gs.acl import SupportedPermissions as GSPermissions +from boto.gs.bucketlistresultset import VersionedBucketListResultSet +from boto.gs.cors import Cors +from boto.gs.lifecycle import LifecycleConfig +from boto.gs.key import Key as GSKey +from boto.s3.acl import Policy +from boto.s3.bucket import Bucket as S3Bucket +from boto.utils import get_utf8_value +from boto.compat import six + +# constants for http query args +DEF_OBJ_ACL = 'defaultObjectAcl' +STANDARD_ACL = 'acl' +CORS_ARG = 'cors' +LIFECYCLE_ARG = 'lifecycle' +ERROR_DETAILS_REGEX = re.compile(r'
    (?P
    .*)
    ') + +class Bucket(S3Bucket): + """Represents a Google Cloud Storage bucket.""" + + VersioningBody = ('\n' + '%s' + '') + WebsiteBody = ('\n' + '%s%s') + WebsiteMainPageFragment = '%s' + WebsiteErrorFragment = '%s' + + def __init__(self, connection=None, name=None, key_class=GSKey): + super(Bucket, self).__init__(connection, name, key_class) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'CreationDate': + self.creation_date = value + else: + setattr(self, name, value) + + def get_key(self, key_name, headers=None, version_id=None, + response_headers=None, generation=None): + """Returns a Key instance for an object in this bucket. + + Note that this method uses a HEAD request to check for the existence of + the key. + + :type key_name: string + :param key_name: The name of the key to retrieve + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/06N3b for details. + + :type version_id: string + :param version_id: Unused in this subclass. + + :type generation: int + :param generation: A specific generation number to fetch the key at. If + not specified, the latest generation is fetched. + + :rtype: :class:`boto.gs.key.Key` + :returns: A Key object from this bucket. + """ + query_args_l = [] + if generation: + query_args_l.append('generation=%s' % generation) + if response_headers: + for rk, rv in six.iteritems(response_headers): + query_args_l.append('%s=%s' % (rk, urllib.quote(rv))) + try: + key, resp = self._get_key_internal(key_name, headers, + query_args_l=query_args_l) + except GSResponseError as e: + if e.status == 403 and 'Forbidden' in e.reason: + # If we failed getting an object, let the user know which object + # failed rather than just returning a generic 403. + e.reason = ("Access denied to 'gs://%s/%s'." % + (self.name, key_name)) + raise + return key + + def copy_key(self, new_key_name, src_bucket_name, src_key_name, + metadata=None, src_version_id=None, storage_class='STANDARD', + preserve_acl=False, encrypt_key=False, headers=None, + query_args=None, src_generation=None): + """Create a new key in the bucket by copying an existing key. + + :type new_key_name: string + :param new_key_name: The name of the new key + + :type src_bucket_name: string + :param src_bucket_name: The name of the source bucket + + :type src_key_name: string + :param src_key_name: The name of the source key + + :type src_generation: int + :param src_generation: The generation number of the source key to copy. + If not specified, the latest generation is copied. + + :type metadata: dict + :param metadata: Metadata to be associated with new key. If + metadata is supplied, it will replace the metadata of the + source key being copied. If no metadata is supplied, the + source key's metadata will be copied to the new key. + + :type version_id: string + :param version_id: Unused in this subclass. + + :type storage_class: string + :param storage_class: The storage class of the new key. By + default, the new key will use the standard storage class. + Possible values are: STANDARD | DURABLE_REDUCED_AVAILABILITY + + :type preserve_acl: bool + :param preserve_acl: If True, the ACL from the source key will + be copied to the destination key. If False, the + destination key will have the default ACL. Note that + preserving the ACL in the new key object will require two + additional API calls to GCS, one to retrieve the current + ACL and one to set that ACL on the new object. If you + don't care about the ACL (or if you have a default ACL set + on the bucket), a value of False will be significantly more + efficient. + + :type encrypt_key: bool + :param encrypt_key: Included for compatibility with S3. This argument is + ignored. + + :type headers: dict + :param headers: A dictionary of header name/value pairs. + + :type query_args: string + :param query_args: A string of additional querystring arguments + to append to the request + + :rtype: :class:`boto.gs.key.Key` + :returns: An instance of the newly created key object + """ + if src_generation: + headers = headers or {} + headers['x-goog-copy-source-generation'] = str(src_generation) + return super(Bucket, self).copy_key( + new_key_name, src_bucket_name, src_key_name, metadata=metadata, + storage_class=storage_class, preserve_acl=preserve_acl, + encrypt_key=encrypt_key, headers=headers, query_args=query_args) + + def list_versions(self, prefix='', delimiter='', marker='', + generation_marker='', headers=None): + """ + List versioned objects within a bucket. This returns an + instance of an VersionedBucketListResultSet that automatically + handles all of the result paging, etc. from GCS. You just need + to keep iterating until there are no more results. Called + with no arguments, this will return an iterator object across + all keys within the bucket. + + :type prefix: string + :param prefix: allows you to limit the listing to a particular + prefix. For example, if you call the method with + prefix='/foo/' then the iterator will only cycle through + the keys that begin with the string '/foo/'. + + :type delimiter: string + :param delimiter: can be used in conjunction with the prefix + to allow you to organize and browse your keys + hierarchically. See: + https://developers.google.com/storage/docs/reference-headers#delimiter + for more details. + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :type generation_marker: string + :param generation_marker: The "generation marker" of where you are in + the result set. + + :type headers: dict + :param headers: A dictionary of header name/value pairs. + + :rtype: + :class:`boto.gs.bucketlistresultset.VersionedBucketListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc. + """ + return VersionedBucketListResultSet(self, prefix, delimiter, + marker, generation_marker, + headers) + + def validate_get_all_versions_params(self, params): + """ + See documentation in boto/s3/bucket.py. + """ + self.validate_kwarg_names(params, + ['version_id_marker', 'delimiter', 'marker', + 'generation_marker', 'prefix', 'max_keys']) + + def delete_key(self, key_name, headers=None, version_id=None, + mfa_token=None, generation=None): + """ + Deletes a key from the bucket. + + :type key_name: string + :param key_name: The key name to delete + + :type headers: dict + :param headers: A dictionary of header name/value pairs. + + :type version_id: string + :param version_id: Unused in this subclass. + + :type mfa_token: tuple or list of strings + :param mfa_token: Unused in this subclass. + + :type generation: int + :param generation: The generation number of the key to delete. If not + specified, the latest generation number will be deleted. + + :rtype: :class:`boto.gs.key.Key` + :returns: A key object holding information on what was + deleted. + """ + query_args_l = [] + if generation: + query_args_l.append('generation=%s' % generation) + self._delete_key_internal(key_name, headers=headers, + version_id=version_id, mfa_token=mfa_token, + query_args_l=query_args_l) + + def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None, + generation=None, if_generation=None, if_metageneration=None): + """Sets or changes a bucket's or key's ACL. + + :type acl_or_str: string or :class:`boto.gs.acl.ACL` + :param acl_or_str: A canned ACL string (see + :data:`~.gs.acl.CannedACLStrings`) or an ACL object. + + :type key_name: string + :param key_name: A key name within the bucket to set the ACL for. If not + specified, the ACL for the bucket will be set. + + :type headers: dict + :param headers: Additional headers to set during the request. + + :type version_id: string + :param version_id: Unused in this subclass. + + :type generation: int + :param generation: If specified, sets the ACL for a specific generation + of a versioned object. If not specified, the current version is + modified. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the acl + will only be updated if its current generation number is this value. + + :type if_metageneration: int + :param if_metageneration: (optional) If set to a metageneration number, + the acl will only be updated if its current metageneration number is + this value. + """ + if isinstance(acl_or_str, Policy): + raise InvalidAclError('Attempt to set S3 Policy on GS ACL') + elif isinstance(acl_or_str, ACL): + self.set_xml_acl(acl_or_str.to_xml(), key_name, headers=headers, + generation=generation, + if_generation=if_generation, + if_metageneration=if_metageneration) + else: + self.set_canned_acl(acl_or_str, key_name, headers=headers, + generation=generation, + if_generation=if_generation, + if_metageneration=if_metageneration) + + def set_def_acl(self, acl_or_str, headers=None): + """Sets or changes a bucket's default ACL. + + :type acl_or_str: string or :class:`boto.gs.acl.ACL` + :param acl_or_str: A canned ACL string (see + :data:`~.gs.acl.CannedACLStrings`) or an ACL object. + + :type headers: dict + :param headers: Additional headers to set during the request. + """ + if isinstance(acl_or_str, Policy): + raise InvalidAclError('Attempt to set S3 Policy on GS ACL') + elif isinstance(acl_or_str, ACL): + self.set_def_xml_acl(acl_or_str.to_xml(), headers=headers) + else: + self.set_def_canned_acl(acl_or_str, headers=headers) + + def _get_xml_acl_helper(self, key_name, headers, query_args): + """Provides common functionality for get_xml_acl and _get_acl_helper.""" + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + if response.status == 403: + match = ERROR_DETAILS_REGEX.search(body) + details = match.group('details') if match else None + if details: + details = (('
    %s. Note that Full Control access' + ' is required to access ACLs.
    ') % + details) + body = re.sub(ERROR_DETAILS_REGEX, details, body) + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return body + + def _get_acl_helper(self, key_name, headers, query_args): + """Provides common functionality for get_acl and get_def_acl.""" + body = self._get_xml_acl_helper(key_name, headers, query_args) + acl = ACL(self) + h = handler.XmlHandler(acl, self) + xml.sax.parseString(body, h) + return acl + + def get_acl(self, key_name='', headers=None, version_id=None, + generation=None): + """Returns the ACL of the bucket or an object in the bucket. + + :param str key_name: The name of the object to get the ACL for. If not + specified, the ACL for the bucket will be returned. + + :param dict headers: Additional headers to set during the request. + + :type version_id: string + :param version_id: Unused in this subclass. + + :param int generation: If specified, gets the ACL for a specific + generation of a versioned object. If not specified, the current + version is returned. This parameter is only valid when retrieving + the ACL of an object, not a bucket. + + :rtype: :class:`.gs.acl.ACL` + """ + query_args = STANDARD_ACL + if generation: + query_args += '&generation=%s' % generation + return self._get_acl_helper(key_name, headers, query_args) + + def get_xml_acl(self, key_name='', headers=None, version_id=None, + generation=None): + """Returns the ACL string of the bucket or an object in the bucket. + + :param str key_name: The name of the object to get the ACL for. If not + specified, the ACL for the bucket will be returned. + + :param dict headers: Additional headers to set during the request. + + :type version_id: string + :param version_id: Unused in this subclass. + + :param int generation: If specified, gets the ACL for a specific + generation of a versioned object. If not specified, the current + version is returned. This parameter is only valid when retrieving + the ACL of an object, not a bucket. + + :rtype: str + """ + query_args = STANDARD_ACL + if generation: + query_args += '&generation=%s' % generation + return self._get_xml_acl_helper(key_name, headers, query_args) + + def get_def_acl(self, headers=None): + """Returns the bucket's default ACL. + + :param dict headers: Additional headers to set during the request. + + :rtype: :class:`.gs.acl.ACL` + """ + return self._get_acl_helper('', headers, DEF_OBJ_ACL) + + def _set_acl_helper(self, acl_or_str, key_name, headers, query_args, + generation, if_generation, if_metageneration, + canned=False): + """Provides common functionality for set_acl, set_xml_acl, + set_canned_acl, set_def_acl, set_def_xml_acl, and + set_def_canned_acl().""" + + headers = headers or {} + data = '' + if canned: + headers[self.connection.provider.acl_header] = acl_or_str + else: + data = acl_or_str + + if generation: + query_args += '&generation=%s' % generation + + if if_metageneration is not None and if_generation is None: + raise ValueError("Received if_metageneration argument with no " + "if_generation argument. A metageneration has no " + "meaning without a content generation.") + if not key_name and (if_generation or if_metageneration): + raise ValueError("Received if_generation or if_metageneration " + "parameter while setting the ACL of a bucket.") + if if_generation is not None: + headers['x-goog-if-generation-match'] = str(if_generation) + if if_metageneration is not None: + headers['x-goog-if-metageneration-match'] = str(if_metageneration) + + response = self.connection.make_request( + 'PUT', get_utf8_value(self.name), get_utf8_value(key_name), + data=get_utf8_value(data), headers=headers, query_args=query_args) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None, + query_args='acl', generation=None, if_generation=None, + if_metageneration=None): + """Sets a bucket's or objects's ACL to an XML string. + + :type acl_str: string + :param acl_str: A string containing the ACL XML. + + :type key_name: string + :param key_name: A key name within the bucket to set the ACL for. If not + specified, the ACL for the bucket will be set. + + :type headers: dict + :param headers: Additional headers to set during the request. + + :type version_id: string + :param version_id: Unused in this subclass. + + :type query_args: str + :param query_args: The query parameters to pass with the request. + + :type generation: int + :param generation: If specified, sets the ACL for a specific generation + of a versioned object. If not specified, the current version is + modified. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the acl + will only be updated if its current generation number is this value. + + :type if_metageneration: int + :param if_metageneration: (optional) If set to a metageneration number, + the acl will only be updated if its current metageneration number is + this value. + """ + return self._set_acl_helper(acl_str, key_name=key_name, headers=headers, + query_args=query_args, + generation=generation, + if_generation=if_generation, + if_metageneration=if_metageneration) + + def set_canned_acl(self, acl_str, key_name='', headers=None, + version_id=None, generation=None, if_generation=None, + if_metageneration=None): + """Sets a bucket's or objects's ACL using a predefined (canned) value. + + :type acl_str: string + :param acl_str: A canned ACL string. See + :data:`~.gs.acl.CannedACLStrings`. + + :type key_name: string + :param key_name: A key name within the bucket to set the ACL for. If not + specified, the ACL for the bucket will be set. + + :type headers: dict + :param headers: Additional headers to set during the request. + + :type version_id: string + :param version_id: Unused in this subclass. + + :type generation: int + :param generation: If specified, sets the ACL for a specific generation + of a versioned object. If not specified, the current version is + modified. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the acl + will only be updated if its current generation number is this value. + + :type if_metageneration: int + :param if_metageneration: (optional) If set to a metageneration number, + the acl will only be updated if its current metageneration number is + this value. + """ + if acl_str not in CannedACLStrings: + raise ValueError("Provided canned ACL string (%s) is not valid." + % acl_str) + query_args = STANDARD_ACL + return self._set_acl_helper(acl_str, key_name, headers, query_args, + generation, if_generation, + if_metageneration, canned=True) + + def set_def_canned_acl(self, acl_str, headers=None): + """Sets a bucket's default ACL using a predefined (canned) value. + + :type acl_str: string + :param acl_str: A canned ACL string. See + :data:`~.gs.acl.CannedACLStrings`. + + :type headers: dict + :param headers: Additional headers to set during the request. + """ + if acl_str not in CannedACLStrings: + raise ValueError("Provided canned ACL string (%s) is not valid." + % acl_str) + query_args = DEF_OBJ_ACL + return self._set_acl_helper(acl_str, '', headers, query_args, + generation=None, if_generation=None, + if_metageneration=None, canned=True) + + def set_def_xml_acl(self, acl_str, headers=None): + """Sets a bucket's default ACL to an XML string. + + :type acl_str: string + :param acl_str: A string containing the ACL XML. + + :type headers: dict + :param headers: Additional headers to set during the request. + """ + return self.set_xml_acl(acl_str, '', headers, + query_args=DEF_OBJ_ACL) + + def get_cors(self, headers=None): + """Returns a bucket's CORS XML document. + + :param dict headers: Additional headers to send with the request. + :rtype: :class:`~.cors.Cors` + """ + response = self.connection.make_request('GET', self.name, + query_args=CORS_ARG, + headers=headers) + body = response.read() + if response.status == 200: + # Success - parse XML and return Cors object. + cors = Cors() + h = handler.XmlHandler(cors, self) + xml.sax.parseString(body, h) + return cors + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_cors(self, cors, headers=None): + """Sets a bucket's CORS XML document. + + :param str cors: A string containing the CORS XML. + :param dict headers: Additional headers to send with the request. + """ + response = self.connection.make_request( + 'PUT', get_utf8_value(self.name), data=get_utf8_value(cors), + query_args=CORS_ARG, headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_storage_class(self): + """ + Returns the StorageClass for the bucket. + + :rtype: str + :return: The StorageClass for the bucket. + """ + response = self.connection.make_request('GET', self.name, + query_args='storageClass') + body = response.read() + if response.status == 200: + rs = ResultSet(self) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs.StorageClass + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + + # Method with same signature as boto.s3.bucket.Bucket.add_email_grant(), + # to allow polymorphic treatment at application layer. + def add_email_grant(self, permission, email_address, + recursive=False, headers=None): + """ + Convenience method that provides a quick way to add an email grant + to a bucket. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL + and then PUT's the new ACL back to GCS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, FULL_CONTROL). + + :type email_address: string + :param email_address: The email address associated with the GS + account your are granting the permission to. + + :type recursive: bool + :param recursive: A boolean value to controls whether the call + will apply the grant to all keys within the bucket + or not. The default value is False. By passing a + True value, the call will iterate through all keys + in the bucket and apply the same grant to each key. + CAUTION: If you have a lot of keys, this could take + a long time! + """ + if permission not in GSPermissions: + raise self.connection.provider.storage_permissions_error( + 'Unknown Permission: %s' % permission) + acl = self.get_acl(headers=headers) + acl.add_email_grant(permission, email_address) + self.set_acl(acl, headers=headers) + if recursive: + for key in self: + key.add_email_grant(permission, email_address, headers=headers) + + # Method with same signature as boto.s3.bucket.Bucket.add_user_grant(), + # to allow polymorphic treatment at application layer. + def add_user_grant(self, permission, user_id, recursive=False, + headers=None): + """ + Convenience method that provides a quick way to add a canonical user + grant to a bucket. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL and + then PUTs the new ACL back to GCS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ|WRITE|FULL_CONTROL) + + :type user_id: string + :param user_id: The canonical user id associated with the GS account + you are granting the permission to. + + :type recursive: bool + :param recursive: A boolean value to controls whether the call + will apply the grant to all keys within the bucket + or not. The default value is False. By passing a + True value, the call will iterate through all keys + in the bucket and apply the same grant to each key. + CAUTION: If you have a lot of keys, this could take + a long time! + """ + if permission not in GSPermissions: + raise self.connection.provider.storage_permissions_error( + 'Unknown Permission: %s' % permission) + acl = self.get_acl(headers=headers) + acl.add_user_grant(permission, user_id) + self.set_acl(acl, headers=headers) + if recursive: + for key in self: + key.add_user_grant(permission, user_id, headers=headers) + + def add_group_email_grant(self, permission, email_address, recursive=False, + headers=None): + """ + Convenience method that provides a quick way to add an email group + grant to a bucket. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL and + then PUT's the new ACL back to GCS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + READ|WRITE|FULL_CONTROL + See http://code.google.com/apis/storage/docs/developer-guide.html#authorization + for more details on permissions. + + :type email_address: string + :param email_address: The email address associated with the Google + Group to which you are granting the permission. + + :type recursive: bool + :param recursive: A boolean value to controls whether the call + will apply the grant to all keys within the bucket + or not. The default value is False. By passing a + True value, the call will iterate through all keys + in the bucket and apply the same grant to each key. + CAUTION: If you have a lot of keys, this could take + a long time! + """ + if permission not in GSPermissions: + raise self.connection.provider.storage_permissions_error( + 'Unknown Permission: %s' % permission) + acl = self.get_acl(headers=headers) + acl.add_group_email_grant(permission, email_address) + self.set_acl(acl, headers=headers) + if recursive: + for key in self: + key.add_group_email_grant(permission, email_address, + headers=headers) + + # Method with same input signature as boto.s3.bucket.Bucket.list_grants() + # (but returning different object type), to allow polymorphic treatment + # at application layer. + def list_grants(self, headers=None): + """Returns the ACL entries applied to this bucket. + + :param dict headers: Additional headers to send with the request. + :rtype: list containing :class:`~.gs.acl.Entry` objects. + """ + acl = self.get_acl(headers=headers) + return acl.entries + + def disable_logging(self, headers=None): + """Disable logging on this bucket. + + :param dict headers: Additional headers to send with the request. + """ + xml_str = '' + self.set_subresource('logging', xml_str, headers=headers) + + def enable_logging(self, target_bucket, target_prefix=None, headers=None): + """Enable logging on a bucket. + + :type target_bucket: bucket or string + :param target_bucket: The bucket to log to. + + :type target_prefix: string + :param target_prefix: The prefix which should be prepended to the + generated log files written to the target_bucket. + + :param dict headers: Additional headers to send with the request. + """ + if isinstance(target_bucket, Bucket): + target_bucket = target_bucket.name + xml_str = '' + xml_str = (xml_str + '%s' % target_bucket) + if target_prefix: + xml_str = (xml_str + + '%s' % target_prefix) + xml_str = xml_str + '' + + self.set_subresource('logging', xml_str, headers=headers) + + def get_logging_config_with_xml(self, headers=None): + """Returns the current status of logging configuration on the bucket as + unparsed XML. + + :param dict headers: Additional headers to send with the request. + + :rtype: 2-Tuple + :returns: 2-tuple containing: + + 1) A dictionary containing the parsed XML response from GCS. The + overall structure is: + + * Logging + + * LogObjectPrefix: Prefix that is prepended to log objects. + * LogBucket: Target bucket for log objects. + + 2) Unparsed XML describing the bucket's logging configuration. + """ + response = self.connection.make_request('GET', self.name, + query_args='logging', + headers=headers) + body = response.read() + boto.log.debug(body) + + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e, body + + def get_logging_config(self, headers=None): + """Returns the current status of logging configuration on the bucket. + + :param dict headers: Additional headers to send with the request. + + :rtype: dict + :returns: A dictionary containing the parsed XML response from GCS. The + overall structure is: + + * Logging + + * LogObjectPrefix: Prefix that is prepended to log objects. + * LogBucket: Target bucket for log objects. + """ + return self.get_logging_config_with_xml(headers)[0] + + def configure_website(self, main_page_suffix=None, error_key=None, + headers=None): + """Configure this bucket to act as a website + + :type main_page_suffix: str + :param main_page_suffix: Suffix that is appended to a request that is + for a "directory" on the website endpoint (e.g. if the suffix is + index.html and you make a request to samplebucket/images/ the data + that is returned will be for the object with the key name + images/index.html). The suffix must not be empty and must not + include a slash character. This parameter is optional and the + property is disabled if excluded. + + :type error_key: str + :param error_key: The object key name to use when a 400 error occurs. + This parameter is optional and the property is disabled if excluded. + + :param dict headers: Additional headers to send with the request. + """ + if main_page_suffix: + main_page_frag = self.WebsiteMainPageFragment % main_page_suffix + else: + main_page_frag = '' + + if error_key: + error_frag = self.WebsiteErrorFragment % error_key + else: + error_frag = '' + + body = self.WebsiteBody % (main_page_frag, error_frag) + response = self.connection.make_request( + 'PUT', get_utf8_value(self.name), data=get_utf8_value(body), + query_args='websiteConfig', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_website_configuration(self, headers=None): + """Returns the current status of website configuration on the bucket. + + :param dict headers: Additional headers to send with the request. + + :rtype: dict + :returns: A dictionary containing the parsed XML response from GCS. The + overall structure is: + + * WebsiteConfiguration + + * MainPageSuffix: suffix that is appended to request that + is for a "directory" on the website endpoint. + * NotFoundPage: name of an object to serve when site visitors + encounter a 404. + """ + return self.get_website_configuration_with_xml(headers)[0] + + def get_website_configuration_with_xml(self, headers=None): + """Returns the current status of website configuration on the bucket as + unparsed XML. + + :param dict headers: Additional headers to send with the request. + + :rtype: 2-Tuple + :returns: 2-tuple containing: + + 1) A dictionary containing the parsed XML response from GCS. The + overall structure is: + + * WebsiteConfiguration + + * MainPageSuffix: suffix that is appended to request that is for + a "directory" on the website endpoint. + * NotFoundPage: name of an object to serve when site visitors + encounter a 404 + + 2) Unparsed XML describing the bucket's website configuration. + """ + response = self.connection.make_request('GET', self.name, + query_args='websiteConfig', headers=headers) + body = response.read() + boto.log.debug(body) + + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e, body + + def delete_website_configuration(self, headers=None): + """Remove the website configuration from this bucket. + + :param dict headers: Additional headers to send with the request. + """ + self.configure_website(headers=headers) + + def get_versioning_status(self, headers=None): + """Returns the current status of versioning configuration on the bucket. + + :rtype: bool + """ + response = self.connection.make_request('GET', self.name, + query_args='versioning', + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + resp_json = boto.jsonresponse.Element() + boto.jsonresponse.XmlHandler(resp_json, None).parse(body) + resp_json = resp_json['VersioningConfiguration'] + return ('Status' in resp_json) and (resp_json['Status'] == 'Enabled') + + def configure_versioning(self, enabled, headers=None): + """Configure versioning for this bucket. + + :param bool enabled: If set to True, enables versioning on this bucket. + If set to False, disables versioning. + + :param dict headers: Additional headers to send with the request. + """ + if enabled == True: + req_body = self.VersioningBody % ('Enabled') + else: + req_body = self.VersioningBody % ('Suspended') + self.set_subresource('versioning', req_body, headers=headers) + + def get_lifecycle_config(self, headers=None): + """ + Returns the current lifecycle configuration on the bucket. + + :rtype: :class:`boto.gs.lifecycle.LifecycleConfig` + :returns: A LifecycleConfig object that describes all current + lifecycle rules in effect for the bucket. + """ + response = self.connection.make_request('GET', self.name, + query_args=LIFECYCLE_ARG, headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + lifecycle_config = LifecycleConfig() + h = handler.XmlHandler(lifecycle_config, self) + xml.sax.parseString(body, h) + return lifecycle_config + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def configure_lifecycle(self, lifecycle_config, headers=None): + """ + Configure lifecycle for this bucket. + + :type lifecycle_config: :class:`boto.gs.lifecycle.LifecycleConfig` + :param lifecycle_config: The lifecycle configuration you want + to configure for this bucket. + """ + xml = lifecycle_config.to_xml() + response = self.connection.make_request( + 'PUT', get_utf8_value(self.name), data=get_utf8_value(xml), + query_args=LIFECYCLE_ARG, headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/gs/bucketlistresultset.py b/desktop/core/ext-py/boto-2.38.0/boto/gs/bucketlistresultset.py new file mode 100644 index 0000000000000000000000000000000000000000..db634cfd4538cc7b91c1a4f77603148e0ef68864 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/gs/bucketlistresultset.py @@ -0,0 +1,64 @@ +# Copyright 2012 Google Inc. +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +def versioned_bucket_lister(bucket, prefix='', delimiter='', + marker='', generation_marker='', headers=None): + """ + A generator function for listing versioned objects. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_versions(prefix=prefix, marker=marker, + generation_marker=generation_marker, + delimiter=delimiter, headers=headers, + max_keys=999) + for k in rs: + yield k + marker = rs.next_marker + generation_marker = rs.next_generation_marker + more_results= rs.is_truncated + +class VersionedBucketListResultSet(object): + """ + A resultset for listing versions within a bucket. Uses the bucket_lister + generator function and implements the iterator interface. This + transparently handles the results paging from GCS so even if you have + many thousands of keys within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + + def __init__(self, bucket=None, prefix='', delimiter='', marker='', + generation_marker='', headers=None): + self.bucket = bucket + self.prefix = prefix + self.delimiter = delimiter + self.marker = marker + self.generation_marker = generation_marker + self.headers = headers + + def __iter__(self): + return versioned_bucket_lister(self.bucket, prefix=self.prefix, + delimiter=self.delimiter, + marker=self.marker, + generation_marker=self.generation_marker, + headers=self.headers) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/gs/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/gs/connection.py new file mode 100755 index 0000000000000000000000000000000000000000..9a2e4a2bbb6e49c9fe7143dc82cfd11c50af5d51 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/gs/connection.py @@ -0,0 +1,129 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.gs.bucket import Bucket +from boto.s3.connection import S3Connection +from boto.s3.connection import SubdomainCallingFormat +from boto.s3.connection import check_lowercase_bucketname +from boto.utils import get_utf8_value + +class Location(object): + DEFAULT = 'US' + EU = 'EU' + +class GSConnection(S3Connection): + + DefaultHost = 'storage.googleapis.com' + QueryString = 'Signature=%s&Expires=%d&GoogleAccessId=%s' + + def __init__(self, gs_access_key_id=None, gs_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, + host=DefaultHost, debug=0, https_connection_factory=None, + calling_format=SubdomainCallingFormat(), path='/', + suppress_consec_slashes=True): + super(GSConnection, self).__init__(gs_access_key_id, gs_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + host, debug, https_connection_factory, calling_format, path, + "google", Bucket, + suppress_consec_slashes=suppress_consec_slashes) + + def create_bucket(self, bucket_name, headers=None, + location=Location.DEFAULT, policy=None, + storage_class='STANDARD'): + """ + Creates a new bucket. By default it's located in the USA. You can + pass Location.EU to create bucket in the EU. You can also pass + a LocationConstraint for where the bucket should be located, and + a StorageClass describing how the data should be stored. + + :type bucket_name: string + :param bucket_name: The name of the new bucket. + + :type headers: dict + :param headers: Additional headers to pass along with the request to GCS. + + :type location: :class:`boto.gs.connection.Location` + :param location: The location of the new bucket. + + :type policy: :class:`boto.gs.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the new key + in GCS. + + :type storage_class: string + :param storage_class: Either 'STANDARD' or 'DURABLE_REDUCED_AVAILABILITY'. + + """ + check_lowercase_bucketname(bucket_name) + + if policy: + if headers: + headers[self.provider.acl_header] = policy + else: + headers = {self.provider.acl_header : policy} + if not location: + location = Location.DEFAULT + location_elem = ('%s' + % location) + if storage_class: + storage_class_elem = ('%s' + % storage_class) + else: + storage_class_elem = '' + data = ('%s%s' + % (location_elem, storage_class_elem)) + response = self.make_request( + 'PUT', get_utf8_value(bucket_name), headers=headers, + data=get_utf8_value(data)) + body = response.read() + if response.status == 409: + raise self.provider.storage_create_error( + response.status, response.reason, body) + if response.status == 200: + return self.bucket_class(self, bucket_name) + else: + raise self.provider.storage_response_error( + response.status, response.reason, body) + + def get_bucket(self, bucket_name, validate=True, headers=None): + """ + Retrieves a bucket by name. + + If the bucket does not exist, an ``S3ResponseError`` will be raised. If + you are unsure if the bucket exists or not, you can use the + ``S3Connection.lookup`` method, which will either return a valid bucket + or ``None``. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :type validate: boolean + :param validate: If ``True``, it will try to fetch all keys within the + given bucket. (Default: ``True``) + """ + bucket = self.bucket_class(self, bucket_name) + if validate: + bucket.get_all_keys(headers, maxkeys=0) + return bucket diff --git a/desktop/core/ext-py/boto-2.38.0/boto/gs/cors.py b/desktop/core/ext-py/boto-2.38.0/boto/gs/cors.py new file mode 100755 index 0000000000000000000000000000000000000000..1c5cfd0c7bda5b9365b95a38eb692ccf0b676642 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/gs/cors.py @@ -0,0 +1,169 @@ +# Copyright 2012 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import types +from boto.gs.user import User +from boto.exception import InvalidCorsError +from xml.sax import handler + +# Relevant tags for the CORS XML document. +CORS_CONFIG = 'CorsConfig' +CORS = 'Cors' +ORIGINS = 'Origins' +ORIGIN = 'Origin' +METHODS = 'Methods' +METHOD = 'Method' +HEADERS = 'ResponseHeaders' +HEADER = 'ResponseHeader' +MAXAGESEC = 'MaxAgeSec' + +class Cors(handler.ContentHandler): + """Encapsulates the CORS configuration XML document""" + def __init__(self): + # List of CORS elements found within a CorsConfig element. + self.cors = [] + # List of collections (e.g. Methods, ResponseHeaders, Origins) + # found within a CORS element. We use a list of lists here + # instead of a dictionary because the collections need to be + # preserved in the order in which they appear in the input XML + # document (and Python dictionary keys are inherently unordered). + # The elements on this list are two element tuples of the form + # (collection name, [list of collection contents]). + self.collections = [] + # Lists of elements within a collection. Again a list is needed to + # preserve ordering but also because the same element may appear + # multiple times within a collection. + self.elements = [] + # Dictionary mapping supported collection names to element types + # which may be contained within each. + self.legal_collections = { + ORIGINS : [ORIGIN], + METHODS : [METHOD], + HEADERS : [HEADER], + MAXAGESEC: [] + } + # List of supported element types within any collection, used for + # checking validadity of a parsed element name. + self.legal_elements = [ORIGIN, METHOD, HEADER] + + self.parse_level = 0 + self.collection = None + self.element = None + + def validateParseLevel(self, tag, level): + """Verify parse level for a given tag.""" + if self.parse_level != level: + raise InvalidCorsError('Invalid tag %s at parse level %d: ' % + (tag, self.parse_level)) + + def startElement(self, name, attrs, connection): + """SAX XML logic for parsing new element found.""" + if name == CORS_CONFIG: + self.validateParseLevel(name, 0) + self.parse_level += 1; + elif name == CORS: + self.validateParseLevel(name, 1) + self.parse_level += 1; + elif name in self.legal_collections: + self.validateParseLevel(name, 2) + self.parse_level += 1; + self.collection = name + elif name in self.legal_elements: + self.validateParseLevel(name, 3) + # Make sure this tag is found inside a collection tag. + if self.collection is None: + raise InvalidCorsError('Tag %s found outside collection' % name) + # Make sure this tag is allowed for the current collection tag. + if name not in self.legal_collections[self.collection]: + raise InvalidCorsError('Tag %s not allowed in %s collection' % + (name, self.collection)) + self.element = name + else: + raise InvalidCorsError('Unsupported tag ' + name) + + def endElement(self, name, value, connection): + """SAX XML logic for parsing new element found.""" + if name == CORS_CONFIG: + self.validateParseLevel(name, 1) + self.parse_level -= 1; + elif name == CORS: + self.validateParseLevel(name, 2) + self.parse_level -= 1; + # Terminating a CORS element, save any collections we found + # and re-initialize collections list. + self.cors.append(self.collections) + self.collections = [] + elif name in self.legal_collections: + self.validateParseLevel(name, 3) + if name != self.collection: + raise InvalidCorsError('Mismatched start and end tags (%s/%s)' % + (self.collection, name)) + self.parse_level -= 1; + if not self.legal_collections[name]: + # If this collection doesn't contain any sub-elements, store + # a tuple of name and this tag's element value. + self.collections.append((name, value.strip())) + else: + # Otherwise, we're terminating a collection of sub-elements, + # so store a tuple of name and list of contained elements. + self.collections.append((name, self.elements)) + self.elements = [] + self.collection = None + elif name in self.legal_elements: + self.validateParseLevel(name, 3) + # Make sure this tag is found inside a collection tag. + if self.collection is None: + raise InvalidCorsError('Tag %s found outside collection' % name) + # Make sure this end tag is allowed for the current collection tag. + if name not in self.legal_collections[self.collection]: + raise InvalidCorsError('Tag %s not allowed in %s collection' % + (name, self.collection)) + if name != self.element: + raise InvalidCorsError('Mismatched start and end tags (%s/%s)' % + (self.element, name)) + # Terminating an element tag, add it to the list of elements + # for the current collection. + self.elements.append((name, value.strip())) + self.element = None + else: + raise InvalidCorsError('Unsupported end tag ' + name) + + def to_xml(self): + """Convert CORS object into XML string representation.""" + s = '<' + CORS_CONFIG + '>' + for collections in self.cors: + s += '<' + CORS + '>' + for (collection, elements_or_value) in collections: + assert collection is not None + s += '<' + collection + '>' + # If collection elements has type string, append atomic value, + # otherwise, append sequence of values in named tags. + if isinstance(elements_or_value, str): + s += elements_or_value + else: + for (name, value) in elements_or_value: + assert name is not None + assert value is not None + s += '<' + name + '>' + value + '' + s += '' + s += '' + s += '' + return s diff --git a/desktop/core/ext-py/boto-2.38.0/boto/gs/key.py b/desktop/core/ext-py/boto-2.38.0/boto/gs/key.py new file mode 100644 index 0000000000000000000000000000000000000000..c4fcf01f091582fede729e7c647d5d749bf4a7c6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/gs/key.py @@ -0,0 +1,946 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import base64 +import binascii +import os +import re + +from boto.compat import StringIO +from boto.exception import BotoClientError +from boto.s3.key import Key as S3Key +from boto.s3.keyfile import KeyFile +from boto.utils import compute_hash +from boto.utils import get_utf8_value + +class Key(S3Key): + """ + Represents a key (object) in a GS bucket. + + :ivar bucket: The parent :class:`boto.gs.bucket.Bucket`. + :ivar name: The name of this Key object. + :ivar metadata: A dictionary containing user metadata that you + wish to store with the object or that has been retrieved from + an existing object. + :ivar cache_control: The value of the `Cache-Control` HTTP header. + :ivar content_type: The value of the `Content-Type` HTTP header. + :ivar content_encoding: The value of the `Content-Encoding` HTTP header. + :ivar content_disposition: The value of the `Content-Disposition` HTTP + header. + :ivar content_language: The value of the `Content-Language` HTTP header. + :ivar etag: The `etag` associated with this object. + :ivar last_modified: The string timestamp representing the last + time this object was modified in GS. + :ivar owner: The ID of the owner of this object. + :ivar storage_class: The storage class of the object. Currently, one of: + STANDARD | DURABLE_REDUCED_AVAILABILITY. + :ivar md5: The MD5 hash of the contents of the object. + :ivar size: The size, in bytes, of the object. + :ivar generation: The generation number of the object. + :ivar metageneration: The generation number of the object metadata. + :ivar encrypted: Whether the object is encrypted while at rest on + the server. + :ivar cloud_hashes: Dictionary of checksums as supplied by the storage + provider. + """ + + def __init__(self, bucket=None, name=None, generation=None): + super(Key, self).__init__(bucket=bucket, name=name) + self.generation = generation + self.meta_generation = None + self.cloud_hashes = {} + self.component_count = None + + def __repr__(self): + if self.generation and self.metageneration: + ver_str = '#%s.%s' % (self.generation, self.metageneration) + else: + ver_str = '' + if self.bucket: + return '' % (self.bucket.name, self.name, ver_str) + else: + return '' % (self.name, ver_str) + + def endElement(self, name, value, connection): + if name == 'Key': + self.name = value + elif name == 'ETag': + self.etag = value + elif name == 'IsLatest': + if value == 'true': + self.is_latest = True + else: + self.is_latest = False + elif name == 'LastModified': + self.last_modified = value + elif name == 'Size': + self.size = int(value) + elif name == 'StorageClass': + self.storage_class = value + elif name == 'Owner': + pass + elif name == 'VersionId': + self.version_id = value + elif name == 'Generation': + self.generation = value + elif name == 'MetaGeneration': + self.metageneration = value + else: + setattr(self, name, value) + + def handle_version_headers(self, resp, force=False): + self.metageneration = resp.getheader('x-goog-metageneration', None) + self.generation = resp.getheader('x-goog-generation', None) + + def handle_restore_headers(self, response): + return + + def handle_addl_headers(self, headers): + for key, value in headers: + if key == 'x-goog-hash': + for hash_pair in value.split(','): + alg, b64_digest = hash_pair.strip().split('=', 1) + self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest) + elif key == 'x-goog-component-count': + self.component_count = int(value) + elif key == 'x-goog-generation': + self.generation = value + # Use x-goog-stored-content-encoding and + # x-goog-stored-content-length to indicate original content length + # and encoding, which are transcoding-invariant (so are preferable + # over using content-encoding and size headers). + elif key == 'x-goog-stored-content-encoding': + self.content_encoding = value + elif key == 'x-goog-stored-content-length': + self.size = int(value) + + def open_read(self, headers=None, query_args='', + override_num_retries=None, response_headers=None): + """ + Open this key for reading + + :type headers: dict + :param headers: Headers to pass in the web request + + :type query_args: string + :param query_args: Arguments to pass in the query string + (ie, 'torrent') + + :type override_num_retries: int + :param override_num_retries: If not None will override configured + num_retries parameter for underlying GET. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + """ + # For GCS we need to include the object generation in the query args. + # The rest of the processing is handled in the parent class. + if self.generation: + if query_args: + query_args += '&' + query_args += 'generation=%s' % self.generation + super(Key, self).open_read(headers=headers, query_args=query_args, + override_num_retries=override_num_retries, + response_headers=response_headers) + + def get_file(self, fp, headers=None, cb=None, num_cb=10, + torrent=False, version_id=None, override_num_retries=None, + response_headers=None, hash_algs=None): + query_args = None + if self.generation: + query_args = ['generation=%s' % self.generation] + self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, + override_num_retries=override_num_retries, + response_headers=response_headers, + hash_algs=hash_algs, + query_args=query_args) + + def get_contents_to_file(self, fp, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None, + res_download_handler=None, + response_headers=None, + hash_algs=None): + """ + Retrieve an object from GCS using the name of the Key object as the + key in GCS. Write the contents of the object to the file pointed + to by 'fp'. + + :type fp: File -like object + :param fp: + + :type headers: dict + :param headers: additional HTTP headers that will be sent with + the GET request. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to GCS and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent + file as a string. + + :type res_upload_handler: ResumableDownloadHandler + :param res_download_handler: If provided, this handler will + perform the download. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/sMkcC for details. + """ + if self.bucket is not None: + if res_download_handler: + res_download_handler.get_file(self, fp, headers, cb, num_cb, + torrent=torrent, + version_id=version_id, + hash_algs=hash_algs) + else: + self.get_file(fp, headers, cb, num_cb, torrent=torrent, + version_id=version_id, + response_headers=response_headers, + hash_algs=hash_algs) + + def compute_hash(self, fp, algorithm, size=None): + """ + :type fp: file + :param fp: File pointer to the file to hash. The file + pointer will be reset to the same position before the + method returns. + + :type algorithm: zero-argument constructor for hash objects that + implements update() and digest() (e.g. hashlib.md5) + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where the file is being split + in place into different parts. Less bytes may be available. + """ + hex_digest, b64_digest, data_size = compute_hash( + fp, size=size, hash_algorithm=algorithm) + # The internal implementation of compute_hash() needs to return the + # data size, but we don't want to return that value to the external + # caller because it changes the class interface (i.e. it might + # break some code), so we consume the third tuple value here and + # return the remainder of the tuple to the caller, thereby preserving + # the existing interface. + self.size = data_size + return (hex_digest, b64_digest) + + def send_file(self, fp, headers=None, cb=None, num_cb=10, + query_args=None, chunked_transfer=False, size=None, + hash_algs=None): + """ + Upload a file to GCS. + + :type fp: file + :param fp: The file pointer to upload. The file pointer must + point point at the offset from which you wish to upload. + ie. if uploading the full file, it should point at the + start of the file. Normally when a file is opened for + reading, the fp will point at the first byte. See the + bytes parameter below for more info. + + :type headers: dict + :param headers: The headers to pass along with the PUT request + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file + transfer. Providing a negative integer will cause your + callback to be called with each buffer read. + + :type query_args: string + :param query_args: Arguments to pass in the query string. + + :type chunked_transfer: boolean + :param chunked_transfer: (optional) If true, we use chunked + Transfer-Encoding. + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where you are splitting the file + up into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + + :type hash_algs: dictionary + :param hash_algs: (optional) Dictionary of hash algorithms and + corresponding hashing class that implements update() and digest(). + Defaults to {'md5': hashlib.md5}. + """ + self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, + query_args=query_args, + chunked_transfer=chunked_transfer, size=size, + hash_algs=hash_algs) + + def delete(self, headers=None): + return self.bucket.delete_key(self.name, version_id=self.version_id, + generation=self.generation, + headers=headers) + + def add_email_grant(self, permission, email_address): + """ + Convenience method that provides a quick way to add an email grant to a + key. This method retrieves the current ACL, creates a new grant based on + the parameters passed in, adds that grant to the ACL and then PUT's the + new ACL back to GS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + READ|FULL_CONTROL + See http://code.google.com/apis/storage/docs/developer-guide.html#authorization + for more details on permissions. + + :type email_address: string + :param email_address: The email address associated with the Google + account to which you are granting the permission. + """ + acl = self.get_acl() + acl.add_email_grant(permission, email_address) + self.set_acl(acl) + + def add_user_grant(self, permission, user_id): + """ + Convenience method that provides a quick way to add a canonical user + grant to a key. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL and + then PUT's the new ACL back to GS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + READ|FULL_CONTROL + See http://code.google.com/apis/storage/docs/developer-guide.html#authorization + for more details on permissions. + + :type user_id: string + :param user_id: The canonical user id associated with the GS account to + which you are granting the permission. + """ + acl = self.get_acl() + acl.add_user_grant(permission, user_id) + self.set_acl(acl) + + def add_group_email_grant(self, permission, email_address, headers=None): + """ + Convenience method that provides a quick way to add an email group + grant to a key. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL and + then PUT's the new ACL back to GS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + READ|FULL_CONTROL + See http://code.google.com/apis/storage/docs/developer-guide.html#authorization + for more details on permissions. + + :type email_address: string + :param email_address: The email address associated with the Google + Group to which you are granting the permission. + """ + acl = self.get_acl(headers=headers) + acl.add_group_email_grant(permission, email_address) + self.set_acl(acl, headers=headers) + + def add_group_grant(self, permission, group_id): + """ + Convenience method that provides a quick way to add a canonical group + grant to a key. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL and + then PUT's the new ACL back to GS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + READ|FULL_CONTROL + See http://code.google.com/apis/storage/docs/developer-guide.html#authorization + for more details on permissions. + + :type group_id: string + :param group_id: The canonical group id associated with the Google + Groups account you are granting the permission to. + """ + acl = self.get_acl() + acl.add_group_grant(permission, group_id) + self.set_acl(acl) + + def set_contents_from_file(self, fp, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + res_upload_handler=None, size=None, rewind=False, + if_generation=None): + """ + Store an object in GS using the name of the Key object as the + key in GS and the contents of the file pointed to by 'fp' as the + contents. + + :type fp: file + :param fp: The file whose contents are to be uploaded. + + :type headers: dict + :param headers: (optional) Additional HTTP headers to be sent with the + PUT request. + + :type replace: bool + :param replace: (optional) If this parameter is False, the method will + first check to see if an object exists in the bucket with the same + key. If it does, it won't overwrite it. The default value is True + which will overwrite the object. + + :type cb: function + :param cb: (optional) Callback function that will be called to report + progress on the upload. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted to GS and the second representing the + total number of bytes that need to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the cb + parameter, this parameter determines the granularity of the callback + by defining the maximum number of times the callback will be called + during the file transfer. + + :type policy: :class:`boto.gs.acl.CannedACLStrings` + :param policy: (optional) A canned ACL policy that will be applied to + the new key in GS. + + :type md5: tuple + :param md5: (optional) A tuple containing the hexdigest version of the + MD5 checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second element. + This is the same format returned by the compute_md5 method. + + If you need to compute the MD5 for any reason prior to upload, it's + silly to have to do it twice so this param, if present, will be + used as the MD5 values of the file. Otherwise, the checksum will be + computed. + + :type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler` + :param res_upload_handler: (optional) If provided, this handler will + perform the upload. + + :type size: int + :param size: (optional) The Maximum number of bytes to read from the + file pointer (fp). This is useful when uploading a file in multiple + parts where you are splitting the file up into different ranges to + be uploaded. If not specified, the default behaviour is to read all + bytes from the file pointer. Less bytes may be available. + + Notes: + + 1. The "size" parameter currently cannot be used when a + resumable upload handler is given but is still useful for + uploading part of a file as implemented by the parent class. + 2. At present Google Cloud Storage does not support multipart + uploads. + + :type rewind: bool + :param rewind: (optional) If True, the file pointer (fp) will be + rewound to the start before any bytes are read from it. The default + behaviour is False which reads from the current position of the + file pointer (fp). + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the + object will only be written to if its current generation number is + this value. If set to the value 0, the object will only be written + if it doesn't already exist. + + :rtype: int + :return: The number of bytes written to the key. + + TODO: At some point we should refactor the Bucket and Key classes, + to move functionality common to all providers into a parent class, + and provider-specific functionality into subclasses (rather than + just overriding/sharing code the way it currently works). + """ + provider = self.bucket.connection.provider + if res_upload_handler and size: + # could use size instead of file_length if provided but... + raise BotoClientError( + '"size" param not supported for resumable uploads.') + headers = headers or {} + if policy: + headers[provider.acl_header] = policy + + if rewind: + # caller requests reading from beginning of fp. + fp.seek(0, os.SEEK_SET) + else: + # The following seek/tell/seek logic is intended + # to detect applications using the older interface to + # set_contents_from_file(), which automatically rewound the + # file each time the Key was reused. This changed with commit + # 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads + # split into multiple parts and uploaded in parallel, and at + # the time of that commit this check was added because otherwise + # older programs would get a success status and upload an empty + # object. Unfortuantely, it's very inefficient for fp's implemented + # by KeyFile (used, for example, by gsutil when copying between + # providers). So, we skip the check for the KeyFile case. + # TODO: At some point consider removing this seek/tell/seek + # logic, after enough time has passed that it's unlikely any + # programs remain that assume the older auto-rewind interface. + if not isinstance(fp, KeyFile): + spos = fp.tell() + fp.seek(0, os.SEEK_END) + if fp.tell() == spos: + fp.seek(0, os.SEEK_SET) + if fp.tell() != spos: + # Raise an exception as this is likely a programming + # error whereby there is data before the fp but nothing + # after it. + fp.seek(spos) + raise AttributeError('fp is at EOF. Use rewind option ' + 'or seek() to data start.') + # seek back to the correct position. + fp.seek(spos) + + if hasattr(fp, 'name'): + self.path = fp.name + if self.bucket is not None: + if isinstance(fp, KeyFile): + # Avoid EOF seek for KeyFile case as it's very inefficient. + key = fp.getkey() + size = key.size - fp.tell() + self.size = size + # At present both GCS and S3 use MD5 for the etag for + # non-multipart-uploaded objects. If the etag is 32 hex + # chars use it as an MD5, to avoid having to read the file + # twice while transferring. + if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)): + etag = key.etag.strip('"') + md5 = (etag, base64.b64encode(binascii.unhexlify(etag))) + if size: + self.size = size + else: + # If md5 is provided, still need to size so + # calculate based on bytes to end of content + spos = fp.tell() + fp.seek(0, os.SEEK_END) + self.size = fp.tell() - spos + fp.seek(spos) + size = self.size + + if md5 is None: + md5 = self.compute_md5(fp, size) + self.md5 = md5[0] + self.base64md5 = md5[1] + + if self.name is None: + self.name = self.md5 + + if not replace: + if self.bucket.lookup(self.name): + return + + if if_generation is not None: + headers['x-goog-if-generation-match'] = str(if_generation) + + if res_upload_handler: + res_upload_handler.send_file(self, fp, headers, cb, num_cb) + else: + # Not a resumable transfer so use basic send_file mechanism. + self.send_file(fp, headers, cb, num_cb, size=size) + + def set_contents_from_filename(self, filename, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=None, + res_upload_handler=None, + if_generation=None): + """ + Store an object in GS using the name of the Key object as the + key in GS and the contents of the file named by 'filename'. + See set_contents_from_file method for details about the + parameters. + + :type filename: string + :param filename: The name of the file that you want to put onto GS. + + :type headers: dict + :param headers: (optional) Additional headers to pass along with the + request to GS. + + :type replace: bool + :param replace: (optional) If True, replaces the contents of the file + if it already exists. + + :type cb: function + :param cb: (optional) Callback function that will be called to report + progress on the upload. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted to GS and the second representing the + total number of bytes that need to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the cb + parameter this parameter determines the granularity of the callback + by defining the maximum number of times the callback will be called + during the file transfer. + + :type policy: :py:attribute:`boto.gs.acl.CannedACLStrings` + :param policy: (optional) A canned ACL policy that will be applied to + the new key in GS. + + :type md5: tuple + :param md5: (optional) A tuple containing the hexdigest version of the + MD5 checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second element. + This is the same format returned by the compute_md5 method. + + If you need to compute the MD5 for any reason prior to upload, it's + silly to have to do it twice so this param, if present, will be + used as the MD5 values of the file. Otherwise, the checksum will be + computed. + + :type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler` + :param res_upload_handler: (optional) If provided, this handler will + perform the upload. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the + object will only be written to if its current generation number is + this value. If set to the value 0, the object will only be written + if it doesn't already exist. + """ + # Clear out any previously computed hashes, since we are setting the + # content. + self.local_hashes = {} + + with open(filename, 'rb') as fp: + self.set_contents_from_file(fp, headers, replace, cb, num_cb, + policy, md5, res_upload_handler, + if_generation=if_generation) + + def set_contents_from_string(self, s, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + if_generation=None): + """ + Store an object in GCS using the name of the Key object as the + key in GCS and the string 's' as the contents. + See set_contents_from_file method for details about the + parameters. + + :type headers: dict + :param headers: Additional headers to pass along with the + request to AWS. + + :type replace: bool + :param replace: If True, replaces the contents of the file if + it already exists. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept + two integer parameters, the first representing the + number of bytes that have been successfully + transmitted to GCS and the second representing the + size of the to be transmitted object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with + the cb parameter this parameter determines the + granularity of the callback by defining + the maximum number of times the callback will + be called during the file transfer. + + :type policy: :class:`boto.gs.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in GCS. + + :type md5: A tuple containing the hexdigest version of the MD5 + checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the + second element. This is the same format returned by + the compute_md5 method. + :param md5: If you need to compute the MD5 for any reason prior + to upload, it's silly to have to do it twice so this + param, if present, will be used as the MD5 values + of the file. Otherwise, the checksum will be computed. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the + object will only be written to if its current generation number is + this value. If set to the value 0, the object will only be written + if it doesn't already exist. + """ + + # Clear out any previously computed md5 hashes, since we are setting the content. + self.md5 = None + self.base64md5 = None + + fp = StringIO(get_utf8_value(s)) + r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, + policy, md5, + if_generation=if_generation) + fp.close() + return r + + def set_contents_from_stream(self, *args, **kwargs): + """ + Store an object using the name of the Key object as the key in + cloud and the contents of the data stream pointed to by 'fp' as + the contents. + + The stream object is not seekable and total size is not known. + This has the implication that we can't specify the + Content-Size and Content-MD5 in the header. So for huge + uploads, the delay in calculating MD5 is avoided but with a + penalty of inability to verify the integrity of the uploaded + data. + + :type fp: file + :param fp: the file whose contents are to be uploaded + + :type headers: dict + :param headers: additional HTTP headers to be sent with the + PUT request. + + :type replace: bool + :param replace: If this parameter is False, the method will first check + to see if an object exists in the bucket with the same key. If it + does, it won't overwrite it. The default value is True which will + overwrite the object. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted to GS and the second representing the + total number of bytes that need to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter, this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.gs.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the new key + in GS. + + :type size: int + :param size: (optional) The Maximum number of bytes to read from + the file pointer (fp). This is useful when uploading a + file in multiple parts where you are splitting the file up + into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the + object will only be written to if its current generation number is + this value. If set to the value 0, the object will only be written + if it doesn't already exist. + """ + if_generation = kwargs.pop('if_generation', None) + if if_generation is not None: + headers = kwargs.get('headers', {}) + headers['x-goog-if-generation-match'] = str(if_generation) + kwargs['headers'] = headers + super(Key, self).set_contents_from_stream(*args, **kwargs) + + def set_acl(self, acl_or_str, headers=None, generation=None, + if_generation=None, if_metageneration=None): + """Sets the ACL for this object. + + :type acl_or_str: string or :class:`boto.gs.acl.ACL` + :param acl_or_str: A canned ACL string (see + :data:`~.gs.acl.CannedACLStrings`) or an ACL object. + + :type headers: dict + :param headers: Additional headers to set during the request. + + :type generation: int + :param generation: If specified, sets the ACL for a specific generation + of a versioned object. If not specified, the current version is + modified. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the acl + will only be updated if its current generation number is this value. + + :type if_metageneration: int + :param if_metageneration: (optional) If set to a metageneration number, + the acl will only be updated if its current metageneration number is + this value. + """ + if self.bucket is not None: + self.bucket.set_acl(acl_or_str, self.name, headers=headers, + generation=generation, + if_generation=if_generation, + if_metageneration=if_metageneration) + + def get_acl(self, headers=None, generation=None): + """Returns the ACL of this object. + + :param dict headers: Additional headers to set during the request. + + :param int generation: If specified, gets the ACL for a specific + generation of a versioned object. If not specified, the current + version is returned. + + :rtype: :class:`.gs.acl.ACL` + """ + if self.bucket is not None: + return self.bucket.get_acl(self.name, headers=headers, + generation=generation) + + def get_xml_acl(self, headers=None, generation=None): + """Returns the ACL string of this object. + + :param dict headers: Additional headers to set during the request. + + :param int generation: If specified, gets the ACL for a specific + generation of a versioned object. If not specified, the current + version is returned. + + :rtype: str + """ + if self.bucket is not None: + return self.bucket.get_xml_acl(self.name, headers=headers, + generation=generation) + + def set_xml_acl(self, acl_str, headers=None, generation=None, + if_generation=None, if_metageneration=None): + """Sets this objects's ACL to an XML string. + + :type acl_str: string + :param acl_str: A string containing the ACL XML. + + :type headers: dict + :param headers: Additional headers to set during the request. + + :type generation: int + :param generation: If specified, sets the ACL for a specific generation + of a versioned object. If not specified, the current version is + modified. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the acl + will only be updated if its current generation number is this value. + + :type if_metageneration: int + :param if_metageneration: (optional) If set to a metageneration number, + the acl will only be updated if its current metageneration number is + this value. + """ + if self.bucket is not None: + return self.bucket.set_xml_acl(acl_str, self.name, headers=headers, + generation=generation, + if_generation=if_generation, + if_metageneration=if_metageneration) + + def set_canned_acl(self, acl_str, headers=None, generation=None, + if_generation=None, if_metageneration=None): + """Sets this objects's ACL using a predefined (canned) value. + + :type acl_str: string + :param acl_str: A canned ACL string. See + :data:`~.gs.acl.CannedACLStrings`. + + :type headers: dict + :param headers: Additional headers to set during the request. + + :type generation: int + :param generation: If specified, sets the ACL for a specific generation + of a versioned object. If not specified, the current version is + modified. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the acl + will only be updated if its current generation number is this value. + + :type if_metageneration: int + :param if_metageneration: (optional) If set to a metageneration number, + the acl will only be updated if its current metageneration number is + this value. + """ + if self.bucket is not None: + return self.bucket.set_canned_acl( + acl_str, + self.name, + headers=headers, + generation=generation, + if_generation=if_generation, + if_metageneration=if_metageneration + ) + + def compose(self, components, content_type=None, headers=None): + """Create a new object from a sequence of existing objects. + + The content of the object representing this Key will be the + concatenation of the given object sequence. For more detail, visit + + https://developers.google.com/storage/docs/composite-objects + + :type components list of Keys + :param components List of gs.Keys representing the component objects + + :type content_type (optional) string + :param content_type Content type for the new composite object. + """ + compose_req = [] + for key in components: + if key.bucket.name != self.bucket.name: + raise BotoClientError( + 'GCS does not support inter-bucket composing') + + generation_tag = '' + if key.generation: + generation_tag = ('%s' + % str(key.generation)) + compose_req.append('%s%s' % + (key.name, generation_tag)) + compose_req_xml = ('%s' % + ''.join(compose_req)) + headers = headers or {} + if content_type: + headers['Content-Type'] = content_type + resp = self.bucket.connection.make_request( + 'PUT', get_utf8_value(self.bucket.name), get_utf8_value(self.name), + headers=headers, query_args='compose', + data=get_utf8_value(compose_req_xml)) + if resp.status < 200 or resp.status > 299: + raise self.bucket.connection.provider.storage_response_error( + resp.status, resp.reason, resp.read()) + + # Return the generation so that the result URI can be built with this + # for automatic parallel uploads. + return resp.getheader('x-goog-generation') diff --git a/desktop/core/ext-py/boto-2.38.0/boto/gs/lifecycle.py b/desktop/core/ext-py/boto-2.38.0/boto/gs/lifecycle.py new file mode 100755 index 0000000000000000000000000000000000000000..65f7d65d4e956ecd386de3a1cba4868d3572e574 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/gs/lifecycle.py @@ -0,0 +1,227 @@ +# Copyright 2013 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import InvalidLifecycleConfigError + +# Relevant tags for the lifecycle configuration XML document. +LIFECYCLE_CONFIG = 'LifecycleConfiguration' +RULE = 'Rule' +ACTION = 'Action' +DELETE = 'Delete' +CONDITION = 'Condition' +AGE = 'Age' +CREATED_BEFORE = 'CreatedBefore' +NUM_NEWER_VERSIONS = 'NumberOfNewerVersions' +IS_LIVE = 'IsLive' + +# List of all action elements. +LEGAL_ACTIONS = [DELETE] +# List of all action parameter elements. +LEGAL_ACTION_PARAMS = [] +# List of all condition elements. +LEGAL_CONDITIONS = [AGE, CREATED_BEFORE, NUM_NEWER_VERSIONS, IS_LIVE] +# Dictionary mapping actions to supported action parameters for each action. +LEGAL_ACTION_ACTION_PARAMS = { + DELETE: [], +} + +class Rule(object): + """ + A lifecycle rule for a bucket. + + :ivar action: Action to be taken. + + :ivar action_params: A dictionary of action specific parameters. Each item + in the dictionary represents the name and value of an action parameter. + + :ivar conditions: A dictionary of conditions that specify when the action + should be taken. Each item in the dictionary represents the name and value + of a condition. + """ + + def __init__(self, action=None, action_params=None, conditions=None): + self.action = action + self.action_params = action_params or {} + self.conditions = conditions or {} + + # Name of the current enclosing tag (used to validate the schema). + self.current_tag = RULE + + def validateStartTag(self, tag, parent): + """Verify parent of the start tag.""" + if self.current_tag != parent: + raise InvalidLifecycleConfigError( + 'Invalid tag %s found inside %s tag' % (tag, self.current_tag)) + + def validateEndTag(self, tag): + """Verify end tag against the start tag.""" + if tag != self.current_tag: + raise InvalidLifecycleConfigError( + 'Mismatched start and end tags (%s/%s)' % + (self.current_tag, tag)) + + def startElement(self, name, attrs, connection): + if name == ACTION: + self.validateStartTag(name, RULE) + elif name in LEGAL_ACTIONS: + self.validateStartTag(name, ACTION) + # Verify there is only one action tag in the rule. + if self.action is not None: + raise InvalidLifecycleConfigError( + 'Only one action tag is allowed in each rule') + self.action = name + elif name in LEGAL_ACTION_PARAMS: + # Make sure this tag is found in an action tag. + if self.current_tag not in LEGAL_ACTIONS: + raise InvalidLifecycleConfigError( + 'Tag %s found outside of action' % name) + # Make sure this tag is allowed for the current action tag. + if name not in LEGAL_ACTION_ACTION_PARAMS[self.action]: + raise InvalidLifecycleConfigError( + 'Tag %s not allowed in action %s' % (name, self.action)) + elif name == CONDITION: + self.validateStartTag(name, RULE) + elif name in LEGAL_CONDITIONS: + self.validateStartTag(name, CONDITION) + # Verify there is no duplicate conditions. + if name in self.conditions: + raise InvalidLifecycleConfigError( + 'Found duplicate conditions %s' % name) + else: + raise InvalidLifecycleConfigError('Unsupported tag ' + name) + self.current_tag = name + + def endElement(self, name, value, connection): + self.validateEndTag(name) + if name == RULE: + # We have to validate the rule after it is fully populated because + # the action and condition elements could be in any order. + self.validate() + elif name == ACTION: + self.current_tag = RULE + elif name in LEGAL_ACTIONS: + self.current_tag = ACTION + elif name in LEGAL_ACTION_PARAMS: + self.current_tag = self.action + # Add the action parameter name and value to the dictionary. + self.action_params[name] = value.strip() + elif name == CONDITION: + self.current_tag = RULE + elif name in LEGAL_CONDITIONS: + self.current_tag = CONDITION + # Add the condition name and value to the dictionary. + self.conditions[name] = value.strip() + else: + raise InvalidLifecycleConfigError('Unsupported end tag ' + name) + + def validate(self): + """Validate the rule.""" + if not self.action: + raise InvalidLifecycleConfigError( + 'No action was specified in the rule') + if not self.conditions: + raise InvalidLifecycleConfigError( + 'No condition was specified for action %s' % self.action) + + def to_xml(self): + """Convert the rule into XML string representation.""" + s = '<' + RULE + '>' + s += '<' + ACTION + '>' + if self.action_params: + s += '<' + self.action + '>' + for param in LEGAL_ACTION_PARAMS: + if param in self.action_params: + s += ('<' + param + '>' + self.action_params[param] + '') + s += '' + else: + s += '<' + self.action + '/>' + s += '' + s += '<' + CONDITION + '>' + for condition in LEGAL_CONDITIONS: + if condition in self.conditions: + s += ('<' + condition + '>' + self.conditions[condition] + '') + s += '' + s += '' + return s + +class LifecycleConfig(list): + """ + A container of rules associated with a lifecycle configuration. + """ + + def __init__(self): + # Track if root tag has been seen. + self.has_root_tag = False + + def startElement(self, name, attrs, connection): + if name == LIFECYCLE_CONFIG: + if self.has_root_tag: + raise InvalidLifecycleConfigError( + 'Only one root tag is allowed in the XML') + self.has_root_tag = True + elif name == RULE: + if not self.has_root_tag: + raise InvalidLifecycleConfigError('Invalid root tag ' + name) + rule = Rule() + self.append(rule) + return rule + else: + raise InvalidLifecycleConfigError('Unsupported tag ' + name) + + def endElement(self, name, value, connection): + if name == LIFECYCLE_CONFIG: + pass + else: + raise InvalidLifecycleConfigError('Unsupported end tag ' + name) + + def to_xml(self): + """Convert LifecycleConfig object into XML string representation.""" + s = '' + s += '<' + LIFECYCLE_CONFIG + '>' + for rule in self: + s += rule.to_xml() + s += '' + return s + + def add_rule(self, action, action_params, conditions): + """ + Add a rule to this Lifecycle configuration. This only adds the rule to + the local copy. To install the new rule(s) on the bucket, you need to + pass this Lifecycle config object to the configure_lifecycle method of + the Bucket object. + + :type action: str + :param action: Action to be taken. + + :type action_params: dict + :param action_params: A dictionary of action specific parameters. Each + item in the dictionary represents the name and value of an action + parameter. + + :type conditions: dict + :param conditions: A dictionary of conditions that specify when the + action should be taken. Each item in the dictionary represents the name + and value of a condition. + """ + rule = Rule(action, action_params, conditions) + self.append(rule) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/gs/resumable_upload_handler.py b/desktop/core/ext-py/boto-2.38.0/boto/gs/resumable_upload_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..d74434693d8e0a87bfa8e249b46457361018e99c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/gs/resumable_upload_handler.py @@ -0,0 +1,679 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import errno +import httplib +import os +import random +import re +import socket +import time +import urlparse +from hashlib import md5 +from boto import config, UserAgent +from boto.connection import AWSAuthConnection +from boto.exception import InvalidUriError +from boto.exception import ResumableTransferDisposition +from boto.exception import ResumableUploadException +from boto.s3.keyfile import KeyFile + +""" +Handler for Google Cloud Storage resumable uploads. See +http://code.google.com/apis/storage/docs/developer-guide.html#resumable +for details. + +Resumable uploads will retry failed uploads, resuming at the byte +count completed by the last upload attempt. If too many retries happen with +no progress (per configurable num_retries param), the upload will be +aborted in the current process. + +The caller can optionally specify a tracker_file_name param in the +ResumableUploadHandler constructor. If you do this, that file will +save the state needed to allow retrying later, in a separate process +(e.g., in a later run of gsutil). +""" + + +class ResumableUploadHandler(object): + + BUFFER_SIZE = 8192 + RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error, + socket.gaierror) + + # (start, end) response indicating server has nothing (upload protocol uses + # inclusive numbering). + SERVER_HAS_NOTHING = (0, -1) + + def __init__(self, tracker_file_name=None, num_retries=None): + """ + Constructor. Instantiate once for each uploaded file. + + :type tracker_file_name: string + :param tracker_file_name: optional file name to save tracker URI. + If supplied and the current process fails the upload, it can be + retried in a new process. If called with an existing file containing + a valid tracker URI, we'll resume the upload from this URI; else + we'll start a new resumable upload (and write the URI to this + tracker file). + + :type num_retries: int + :param num_retries: the number of times we'll re-try a resumable upload + making no progress. (Count resets every time we get progress, so + upload can span many more than this number of retries.) + """ + self.tracker_file_name = tracker_file_name + self.num_retries = num_retries + self.server_has_bytes = 0 # Byte count at last server check. + self.tracker_uri = None + if tracker_file_name: + self._load_tracker_uri_from_file() + # Save upload_start_point in instance state so caller can find how + # much was transferred by this ResumableUploadHandler (across retries). + self.upload_start_point = None + + def _load_tracker_uri_from_file(self): + f = None + try: + f = open(self.tracker_file_name, 'r') + uri = f.readline().strip() + self._set_tracker_uri(uri) + except IOError as e: + # Ignore non-existent file (happens first time an upload + # is attempted on a file), but warn user for other errors. + if e.errno != errno.ENOENT: + # Will restart because self.tracker_uri is None. + print('Couldn\'t read URI tracker file (%s): %s. Restarting ' + 'upload from scratch.' % + (self.tracker_file_name, e.strerror)) + except InvalidUriError as e: + # Warn user, but proceed (will restart because + # self.tracker_uri is None). + print('Invalid tracker URI (%s) found in URI tracker file ' + '(%s). Restarting upload from scratch.' % + (uri, self.tracker_file_name)) + finally: + if f: + f.close() + + def _save_tracker_uri_to_file(self): + """ + Saves URI to tracker file if one was passed to constructor. + """ + if not self.tracker_file_name: + return + f = None + try: + with os.fdopen(os.open(self.tracker_file_name, + os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f: + f.write(self.tracker_uri) + except IOError as e: + raise ResumableUploadException( + 'Couldn\'t write URI tracker file (%s): %s.\nThis can happen' + 'if you\'re using an incorrectly configured upload tool\n' + '(e.g., gsutil configured to save tracker files to an ' + 'unwritable directory)' % + (self.tracker_file_name, e.strerror), + ResumableTransferDisposition.ABORT) + + def _set_tracker_uri(self, uri): + """ + Called when we start a new resumable upload or get a new tracker + URI for the upload. Saves URI and resets upload state. + + Raises InvalidUriError if URI is syntactically invalid. + """ + parse_result = urlparse.urlparse(uri) + if (parse_result.scheme.lower() not in ['http', 'https'] or + not parse_result.netloc): + raise InvalidUriError('Invalid tracker URI (%s)' % uri) + self.tracker_uri = uri + self.tracker_uri_host = parse_result.netloc + self.tracker_uri_path = '%s?%s' % ( + parse_result.path, parse_result.query) + self.server_has_bytes = 0 + + def get_tracker_uri(self): + """ + Returns upload tracker URI, or None if the upload has not yet started. + """ + return self.tracker_uri + + def get_upload_id(self): + """ + Returns the upload ID for the resumable upload, or None if the upload + has not yet started. + """ + # We extract the upload_id from the tracker uri. We could retrieve the + # upload_id from the headers in the response but this only works for + # the case where we get the tracker uri from the service. In the case + # where we get the tracker from the tracking file we need to do this + # logic anyway. + delim = '?upload_id=' + if self.tracker_uri and delim in self.tracker_uri: + return self.tracker_uri[self.tracker_uri.index(delim) + len(delim):] + else: + return None + + def _remove_tracker_file(self): + if (self.tracker_file_name and + os.path.exists(self.tracker_file_name)): + os.unlink(self.tracker_file_name) + + def _build_content_range_header(self, range_spec='*', length_spec='*'): + return 'bytes %s/%s' % (range_spec, length_spec) + + def _query_server_state(self, conn, file_length): + """ + Queries server to find out state of given upload. + + Note that this method really just makes special case use of the + fact that the upload server always returns the current start/end + state whenever a PUT doesn't complete. + + Returns HTTP response from sending request. + + Raises ResumableUploadException if problem querying server. + """ + # Send an empty PUT so that server replies with this resumable + # transfer's state. + put_headers = {} + put_headers['Content-Range'] = ( + self._build_content_range_header('*', file_length)) + put_headers['Content-Length'] = '0' + return AWSAuthConnection.make_request(conn, 'PUT', + path=self.tracker_uri_path, + auth_path=self.tracker_uri_path, + headers=put_headers, + host=self.tracker_uri_host) + + def _query_server_pos(self, conn, file_length): + """ + Queries server to find out what bytes it currently has. + + Returns (server_start, server_end), where the values are inclusive. + For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2. + + Raises ResumableUploadException if problem querying server. + """ + resp = self._query_server_state(conn, file_length) + if resp.status == 200: + # To handle the boundary condition where the server has the complete + # file, we return (server_start, file_length-1). That way the + # calling code can always simply read up through server_end. (If we + # didn't handle this boundary condition here, the caller would have + # to check whether server_end == file_length and read one fewer byte + # in that case.) + return (0, file_length - 1) # Completed upload. + if resp.status != 308: + # This means the server didn't have any state for the given + # upload ID, which can happen (for example) if the caller saved + # the tracker URI to a file and then tried to restart the transfer + # after that upload ID has gone stale. In that case we need to + # start a new transfer (and the caller will then save the new + # tracker URI to the tracker file). + raise ResumableUploadException( + 'Got non-308 response (%s) from server state query' % + resp.status, ResumableTransferDisposition.START_OVER) + got_valid_response = False + range_spec = resp.getheader('range') + if range_spec: + # Parse 'bytes=-' range_spec. + m = re.search('bytes=(\d+)-(\d+)', range_spec) + if m: + server_start = long(m.group(1)) + server_end = long(m.group(2)) + got_valid_response = True + else: + # No Range header, which means the server does not yet have + # any bytes. Note that the Range header uses inclusive 'from' + # and 'to' values. Since Range 0-0 would mean that the server + # has byte 0, omitting the Range header is used to indicate that + # the server doesn't have any bytes. + return self.SERVER_HAS_NOTHING + if not got_valid_response: + raise ResumableUploadException( + 'Couldn\'t parse upload server state query response (%s)' % + str(resp.getheaders()), ResumableTransferDisposition.START_OVER) + if conn.debug >= 1: + print('Server has: Range: %d - %d.' % (server_start, server_end)) + return (server_start, server_end) + + def _start_new_resumable_upload(self, key, headers=None): + """ + Starts a new resumable upload. + + Raises ResumableUploadException if any errors occur. + """ + conn = key.bucket.connection + if conn.debug >= 1: + print('Starting new resumable upload.') + self.server_has_bytes = 0 + + # Start a new resumable upload by sending a POST request with an + # empty body and the "X-Goog-Resumable: start" header. Include any + # caller-provided headers (e.g., Content-Type) EXCEPT Content-Length + # (and raise an exception if they tried to pass one, since it's + # a semantic error to specify it at this point, and if we were to + # include one now it would cause the server to expect that many + # bytes; the POST doesn't include the actual file bytes We set + # the Content-Length in the subsequent PUT, based on the uploaded + # file size. + post_headers = {} + for k in headers: + if k.lower() == 'content-length': + raise ResumableUploadException( + 'Attempt to specify Content-Length header (disallowed)', + ResumableTransferDisposition.ABORT) + post_headers[k] = headers[k] + post_headers[conn.provider.resumable_upload_header] = 'start' + + resp = conn.make_request( + 'POST', key.bucket.name, key.name, post_headers) + # Get tracker URI from response 'Location' header. + body = resp.read() + + # Check for various status conditions. + if resp.status in [500, 503]: + # Retry status 500 and 503 errors after a delay. + raise ResumableUploadException( + 'Got status %d from attempt to start resumable upload. ' + 'Will wait/retry' % resp.status, + ResumableTransferDisposition.WAIT_BEFORE_RETRY) + elif resp.status != 200 and resp.status != 201: + raise ResumableUploadException( + 'Got status %d from attempt to start resumable upload. ' + 'Aborting' % resp.status, + ResumableTransferDisposition.ABORT) + + # Else we got 200 or 201 response code, indicating the resumable + # upload was created. + tracker_uri = resp.getheader('Location') + if not tracker_uri: + raise ResumableUploadException( + 'No resumable tracker URI found in resumable initiation ' + 'POST response (%s)' % body, + ResumableTransferDisposition.WAIT_BEFORE_RETRY) + self._set_tracker_uri(tracker_uri) + self._save_tracker_uri_to_file() + + def _upload_file_bytes(self, conn, http_conn, fp, file_length, + total_bytes_uploaded, cb, num_cb, headers): + """ + Makes one attempt to upload file bytes, using an existing resumable + upload connection. + + Returns (etag, generation, metageneration) from server upon success. + + Raises ResumableUploadException if any problems occur. + """ + buf = fp.read(self.BUFFER_SIZE) + if cb: + # The cb_count represents the number of full buffers to send between + # cb executions. + if num_cb > 2: + cb_count = file_length / self.BUFFER_SIZE / (num_cb-2) + elif num_cb < 0: + cb_count = -1 + else: + cb_count = 0 + i = 0 + cb(total_bytes_uploaded, file_length) + + # Build resumable upload headers for the transfer. Don't send a + # Content-Range header if the file is 0 bytes long, because the + # resumable upload protocol uses an *inclusive* end-range (so, sending + # 'bytes 0-0/1' would actually mean you're sending a 1-byte file). + if not headers: + put_headers = {} + else: + put_headers = headers.copy() + if file_length: + if total_bytes_uploaded == file_length: + range_header = self._build_content_range_header( + '*', file_length) + else: + range_header = self._build_content_range_header( + '%d-%d' % (total_bytes_uploaded, file_length - 1), + file_length) + put_headers['Content-Range'] = range_header + # Set Content-Length to the total bytes we'll send with this PUT. + put_headers['Content-Length'] = str(file_length - total_bytes_uploaded) + http_request = AWSAuthConnection.build_base_http_request( + conn, 'PUT', path=self.tracker_uri_path, auth_path=None, + headers=put_headers, host=self.tracker_uri_host) + http_conn.putrequest('PUT', http_request.path) + for k in put_headers: + http_conn.putheader(k, put_headers[k]) + http_conn.endheaders() + + # Turn off debug on http connection so upload content isn't included + # in debug stream. + http_conn.set_debuglevel(0) + while buf: + http_conn.send(buf) + for alg in self.digesters: + self.digesters[alg].update(buf) + total_bytes_uploaded += len(buf) + if cb: + i += 1 + if i == cb_count or cb_count == -1: + cb(total_bytes_uploaded, file_length) + i = 0 + buf = fp.read(self.BUFFER_SIZE) + http_conn.set_debuglevel(conn.debug) + if cb: + cb(total_bytes_uploaded, file_length) + if total_bytes_uploaded != file_length: + # Abort (and delete the tracker file) so if the user retries + # they'll start a new resumable upload rather than potentially + # attempting to pick back up later where we left off. + raise ResumableUploadException( + 'File changed during upload: EOF at %d bytes of %d byte file.' % + (total_bytes_uploaded, file_length), + ResumableTransferDisposition.ABORT) + resp = http_conn.getresponse() + # Restore http connection debug level. + http_conn.set_debuglevel(conn.debug) + + if resp.status == 200: + # Success. + return (resp.getheader('etag'), + resp.getheader('x-goog-generation'), + resp.getheader('x-goog-metageneration')) + # Retry timeout (408) and status 500 and 503 errors after a delay. + elif resp.status in [408, 500, 503]: + disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY + else: + # Catch all for any other error codes. + disposition = ResumableTransferDisposition.ABORT + raise ResumableUploadException('Got response code %d while attempting ' + 'upload (%s)' % + (resp.status, resp.reason), disposition) + + def _attempt_resumable_upload(self, key, fp, file_length, headers, cb, + num_cb): + """ + Attempts a resumable upload. + + Returns (etag, generation, metageneration) from server upon success. + + Raises ResumableUploadException if any problems occur. + """ + (server_start, server_end) = self.SERVER_HAS_NOTHING + conn = key.bucket.connection + if self.tracker_uri: + # Try to resume existing resumable upload. + try: + (server_start, server_end) = ( + self._query_server_pos(conn, file_length)) + self.server_has_bytes = server_start + + if server_end: + # If the server already has some of the content, we need to + # update the digesters with the bytes that have already been + # uploaded to ensure we get a complete hash in the end. + print('Catching up hash digest(s) for resumed upload') + fp.seek(0) + # Read local file's bytes through position server has. For + # example, if server has (0, 3) we want to read 3-0+1=4 bytes. + bytes_to_go = server_end + 1 + while bytes_to_go: + chunk = fp.read(min(key.BufferSize, bytes_to_go)) + if not chunk: + raise ResumableUploadException( + 'Hit end of file during resumable upload hash ' + 'catchup. This should not happen under\n' + 'normal circumstances, as it indicates the ' + 'server has more bytes of this transfer\nthan' + ' the current file size. Restarting upload.', + ResumableTransferDisposition.START_OVER) + for alg in self.digesters: + self.digesters[alg].update(chunk) + bytes_to_go -= len(chunk) + + if conn.debug >= 1: + print('Resuming transfer.') + except ResumableUploadException as e: + if conn.debug >= 1: + print('Unable to resume transfer (%s).' % e.message) + self._start_new_resumable_upload(key, headers) + else: + self._start_new_resumable_upload(key, headers) + + # upload_start_point allows the code that instantiated the + # ResumableUploadHandler to find out the point from which it started + # uploading (e.g., so it can correctly compute throughput). + if self.upload_start_point is None: + self.upload_start_point = server_end + + total_bytes_uploaded = server_end + 1 + # Corner case: Don't attempt to seek if we've already uploaded the + # entire file, because if the file is a stream (e.g., the KeyFile + # wrapper around input key when copying between providers), attempting + # to seek to the end of file would result in an InvalidRange error. + if file_length < total_bytes_uploaded: + fp.seek(total_bytes_uploaded) + conn = key.bucket.connection + + # Get a new HTTP connection (vs conn.get_http_connection(), which reuses + # pool connections) because httplib requires a new HTTP connection per + # transaction. (Without this, calling http_conn.getresponse() would get + # "ResponseNotReady".) + http_conn = conn.new_http_connection(self.tracker_uri_host, conn.port, + conn.is_secure) + http_conn.set_debuglevel(conn.debug) + + # Make sure to close http_conn at end so if a local file read + # failure occurs partway through server will terminate current upload + # and can report that progress on next attempt. + try: + return self._upload_file_bytes(conn, http_conn, fp, file_length, + total_bytes_uploaded, cb, num_cb, + headers) + except (ResumableUploadException, socket.error): + resp = self._query_server_state(conn, file_length) + if resp.status == 400: + raise ResumableUploadException('Got 400 response from server ' + 'state query after failed resumable upload attempt. This ' + 'can happen for various reasons, including specifying an ' + 'invalid request (e.g., an invalid canned ACL) or if the ' + 'file size changed between upload attempts', + ResumableTransferDisposition.ABORT) + else: + raise + finally: + http_conn.close() + + def _check_final_md5(self, key, etag): + """ + Checks that etag from server agrees with md5 computed before upload. + This is important, since the upload could have spanned a number of + hours and multiple processes (e.g., gsutil runs), and the user could + change some of the file and not realize they have inconsistent data. + """ + if key.bucket.connection.debug >= 1: + print('Checking md5 against etag.') + if key.md5 != etag.strip('"\''): + # Call key.open_read() before attempting to delete the + # (incorrect-content) key, so we perform that request on a + # different HTTP connection. This is neededb because httplib + # will return a "Response not ready" error if you try to perform + # a second transaction on the connection. + key.open_read() + key.close() + key.delete() + raise ResumableUploadException( + 'File changed during upload: md5 signature doesn\'t match etag ' + '(incorrect uploaded object deleted)', + ResumableTransferDisposition.ABORT) + + def handle_resumable_upload_exception(self, e, debug): + if (e.disposition == ResumableTransferDisposition.ABORT_CUR_PROCESS): + if debug >= 1: + print('Caught non-retryable ResumableUploadException (%s); ' + 'aborting but retaining tracker file' % e.message) + raise + elif (e.disposition == ResumableTransferDisposition.ABORT): + if debug >= 1: + print('Caught non-retryable ResumableUploadException (%s); ' + 'aborting and removing tracker file' % e.message) + self._remove_tracker_file() + raise + else: + if debug >= 1: + print('Caught ResumableUploadException (%s) - will retry' % + e.message) + + def track_progress_less_iterations(self, server_had_bytes_before_attempt, + roll_back_md5=True, debug=0): + # At this point we had a re-tryable failure; see if made progress. + if self.server_has_bytes > server_had_bytes_before_attempt: + self.progress_less_iterations = 0 # If progress, reset counter. + else: + self.progress_less_iterations += 1 + if roll_back_md5: + # Rollback any potential hash updates, as we did not + # make any progress in this iteration. + self.digesters = self.digesters_before_attempt + + if self.progress_less_iterations > self.num_retries: + # Don't retry any longer in the current process. + raise ResumableUploadException( + 'Too many resumable upload attempts failed without ' + 'progress. You might try this upload again later', + ResumableTransferDisposition.ABORT_CUR_PROCESS) + + # Use binary exponential backoff to desynchronize client requests. + sleep_time_secs = random.random() * (2**self.progress_less_iterations) + if debug >= 1: + print('Got retryable failure (%d progress-less in a row).\n' + 'Sleeping %3.1f seconds before re-trying' % + (self.progress_less_iterations, sleep_time_secs)) + time.sleep(sleep_time_secs) + + def send_file(self, key, fp, headers, cb=None, num_cb=10, hash_algs=None): + """ + Upload a file to a key into a bucket on GS, using GS resumable upload + protocol. + + :type key: :class:`boto.s3.key.Key` or subclass + :param key: The Key object to which data is to be uploaded + + :type fp: file-like object + :param fp: The file pointer to upload + + :type headers: dict + :param headers: The headers to pass along with the PUT request + + :type cb: function + :param cb: a callback function that will be called to report progress on + the upload. The callback should accept two integer parameters, the + first representing the number of bytes that have been successfully + transmitted to GS, and the second representing the total number of + bytes that need to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the cb + parameter, this parameter determines the granularity of the callback + by defining the maximum number of times the callback will be called + during the file transfer. Providing a negative integer will cause + your callback to be called with each buffer read. + + :type hash_algs: dictionary + :param hash_algs: (optional) Dictionary mapping hash algorithm + descriptions to corresponding state-ful hashing objects that + implement update(), digest(), and copy() (e.g. hashlib.md5()). + Defaults to {'md5': md5()}. + + Raises ResumableUploadException if a problem occurs during the transfer. + """ + + if not headers: + headers = {} + # If Content-Type header is present and set to None, remove it. + # This is gsutil's way of asking boto to refrain from auto-generating + # that header. + CT = 'Content-Type' + if CT in headers and headers[CT] is None: + del headers[CT] + + headers['User-Agent'] = UserAgent + + # Determine file size different ways for case where fp is actually a + # wrapper around a Key vs an actual file. + if isinstance(fp, KeyFile): + file_length = fp.getkey().size + else: + fp.seek(0, os.SEEK_END) + file_length = fp.tell() + fp.seek(0) + debug = key.bucket.connection.debug + + # Compute the MD5 checksum on the fly. + if hash_algs is None: + hash_algs = {'md5': md5} + self.digesters = dict( + (alg, hash_algs[alg]()) for alg in hash_algs or {}) + + # Use num-retries from constructor if one was provided; else check + # for a value specified in the boto config file; else default to 5. + if self.num_retries is None: + self.num_retries = config.getint('Boto', 'num_retries', 6) + self.progress_less_iterations = 0 + + while True: # Retry as long as we're making progress. + server_had_bytes_before_attempt = self.server_has_bytes + self.digesters_before_attempt = dict( + (alg, self.digesters[alg].copy()) + for alg in self.digesters) + try: + # Save generation and metageneration in class state so caller + # can find these values, for use in preconditions of future + # operations on the uploaded object. + (etag, self.generation, self.metageneration) = ( + self._attempt_resumable_upload(key, fp, file_length, + headers, cb, num_cb)) + + # Get the final digests for the uploaded content. + for alg in self.digesters: + key.local_hashes[alg] = self.digesters[alg].digest() + + # Upload succceded, so remove the tracker file (if have one). + self._remove_tracker_file() + self._check_final_md5(key, etag) + key.generation = self.generation + if debug >= 1: + print('Resumable upload complete.') + return + except self.RETRYABLE_EXCEPTIONS as e: + if debug >= 1: + print('Caught exception (%s)' % e.__repr__()) + if isinstance(e, IOError) and e.errno == errno.EPIPE: + # Broken pipe error causes httplib to immediately + # close the socket (http://bugs.python.org/issue5542), + # so we need to close the connection before we resume + # the upload (which will cause a new connection to be + # opened the next time an HTTP request is sent). + key.bucket.connection.connection.close() + except ResumableUploadException as e: + self.handle_resumable_upload_exception(e, debug) + + self.track_progress_less_iterations(server_had_bytes_before_attempt, + True, debug) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/gs/user.py b/desktop/core/ext-py/boto-2.38.0/boto/gs/user.py new file mode 100755 index 0000000000000000000000000000000000000000..c3072952f968eecf4916b1f731170639df366421 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/gs/user.py @@ -0,0 +1,54 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class User(object): + def __init__(self, parent=None, id='', name=''): + if parent: + parent.owner = self + self.type = None + self.id = id + self.name = name + + def __repr__(self): + return self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'ID': + self.id = value + else: + setattr(self, name, value) + + def to_xml(self, element_name='Owner'): + if self.type: + s = '<%s type="%s">' % (element_name, self.type) + else: + s = '<%s>' % element_name + s += '%s' % self.id + if self.name: + s += '%s' % self.name + s += '' % element_name + return s diff --git a/desktop/core/ext-py/boto-2.38.0/boto/handler.py b/desktop/core/ext-py/boto-2.38.0/boto/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..3b5f073275e9def495cbedfd82da416bd1c02008 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/handler.py @@ -0,0 +1,60 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax + +from boto.compat import StringIO + + +class XmlHandler(xml.sax.ContentHandler): + + def __init__(self, root_node, connection): + self.connection = connection + self.nodes = [('root', root_node)] + self.current_text = '' + + def startElement(self, name, attrs): + self.current_text = '' + new_node = self.nodes[-1][1].startElement(name, attrs, self.connection) + if new_node is not None: + self.nodes.append((name, new_node)) + + def endElement(self, name): + self.nodes[-1][1].endElement(name, self.current_text, self.connection) + if self.nodes[-1][0] == name: + if hasattr(self.nodes[-1][1], 'endNode'): + self.nodes[-1][1].endNode(self.connection) + self.nodes.pop() + self.current_text = '' + + def characters(self, content): + self.current_text += content + + +class XmlHandlerWrapper(object): + def __init__(self, root_node, connection): + self.handler = XmlHandler(root_node, connection) + self.parser = xml.sax.make_parser() + self.parser.setContentHandler(self.handler) + self.parser.setFeature(xml.sax.handler.feature_external_ges, 0) + + def parseString(self, content): + return self.parser.parse(StringIO(content)) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/https_connection.py b/desktop/core/ext-py/boto-2.38.0/boto/https_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..ddc31a152292e69897fcfc01b9b44f354bd4051d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/https_connection.py @@ -0,0 +1,138 @@ +# Copyright 2007,2011 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file is derived from +# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py + + +"""Extensions to allow HTTPS requests with SSL certificate validation.""" + +import re +import socket +import ssl + +import boto + +from boto.compat import six, http_client + + +class InvalidCertificateException(http_client.HTTPException): + """Raised when a certificate is provided with an invalid hostname.""" + + def __init__(self, host, cert, reason): + """Constructor. + + Args: + host: The hostname the connection was made to. + cert: The SSL certificate (as a dictionary) the host returned. + """ + http_client.HTTPException.__init__(self) + self.host = host + self.cert = cert + self.reason = reason + + def __str__(self): + return ('Host %s returned an invalid certificate (%s): %s' % + (self.host, self.reason, self.cert)) + + +def GetValidHostsForCert(cert): + """Returns a list of valid host globs for an SSL certificate. + + Args: + cert: A dictionary representing an SSL certificate. + Returns: + list: A list of valid host globs. + """ + if 'subjectAltName' in cert: + return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns'] + else: + return [x[0][1] for x in cert['subject'] + if x[0][0].lower() == 'commonname'] + + +def ValidateCertificateHostname(cert, hostname): + """Validates that a given hostname is valid for an SSL certificate. + + Args: + cert: A dictionary representing an SSL certificate. + hostname: The hostname to test. + Returns: + bool: Whether or not the hostname is valid for this certificate. + """ + hosts = GetValidHostsForCert(cert) + boto.log.debug( + "validating server certificate: hostname=%s, certificate hosts=%s", + hostname, hosts) + for host in hosts: + host_re = host.replace('.', '\.').replace('*', '[^.]*') + if re.search('^%s$' % (host_re,), hostname, re.I): + return True + return False + + +class CertValidatingHTTPSConnection(http_client.HTTPConnection): + """An HTTPConnection that connects over SSL and validates certificates.""" + + default_port = http_client.HTTPS_PORT + + def __init__(self, host, port=default_port, key_file=None, cert_file=None, + ca_certs=None, strict=None, **kwargs): + """Constructor. + + Args: + host: The hostname. Can be in 'host:port' form. + port: The port. Defaults to 443. + key_file: A file containing the client's private key + cert_file: A file containing the client's certificates + ca_certs: A file contianing a set of concatenated certificate authority + certs for validating the server against. + strict: When true, causes BadStatusLine to be raised if the status line + can't be parsed as a valid HTTP/1.0 or 1.1 status line. + """ + if six.PY2: + # Python 3.2 and newer have deprecated and removed the strict + # parameter. Since the params are supported as keyword arguments + # we conditionally add it here. + kwargs['strict'] = strict + + http_client.HTTPConnection.__init__(self, host=host, port=port, **kwargs) + self.key_file = key_file + self.cert_file = cert_file + self.ca_certs = ca_certs + + def connect(self): + "Connect to a host on a given (SSL) port." + if hasattr(self, "timeout"): + sock = socket.create_connection((self.host, self.port), self.timeout) + else: + sock = socket.create_connection((self.host, self.port)) + msg = "wrapping ssl socket; " + if self.ca_certs: + msg += "CA certificate file=%s" % self.ca_certs + else: + msg += "using system provided SSL certs" + boto.log.debug(msg) + self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, + certfile=self.cert_file, + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=self.ca_certs) + cert = self.sock.getpeercert() + hostname = self.host.split(':', 0)[0] + if not ValidateCertificateHostname(cert, hostname): + raise InvalidCertificateException(hostname, + cert, + 'remote hostname "%s" does not match ' + 'certificate' % hostname) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/iam/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/iam/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3df44f29536494b995c8f62e4e05c6c02fac65da --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/iam/__init__.py @@ -0,0 +1,86 @@ +# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# this is here for backward compatibility +# originally, the IAMConnection class was defined here +from boto.iam.connection import IAMConnection +from boto.regioninfo import RegionInfo, get_regions + + +class IAMRegionInfo(RegionInfo): + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an connection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the connection + class's constructor as keyword arguments and they will be + passed along to the connection object. + + :rtype: Connection object + :return: The connection to this regions endpoint + """ + if self.connection_cls: + return self.connection_cls(host=self.endpoint, **kw_params) + + +def regions(): + """ + Get all available regions for the IAM service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + regions = get_regions( + 'iam', + region_cls=IAMRegionInfo, + connection_cls=IAMConnection + ) + + # For historical reasons, we had a "universal" endpoint as well. + regions.append( + IAMRegionInfo( + name='universal', + endpoint='iam.amazonaws.com', + connection_cls=IAMConnection + ) + ) + + return regions + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.iam.connection.IAMConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.iam.connection.IAMConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/iam/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/iam/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..392d3f355cdf6d277ab89d909d15510a2a86f65e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/iam/connection.py @@ -0,0 +1,1642 @@ +# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +import boto.jsonresponse +from boto.compat import json, six +from boto.resultset import ResultSet +from boto.iam.summarymap import SummaryMap +from boto.connection import AWSQueryConnection + +DEFAULT_POLICY_DOCUMENTS = { + 'default': { + 'Statement': [ + { + 'Principal': { + 'Service': ['ec2.amazonaws.com'] + }, + 'Effect': 'Allow', + 'Action': ['sts:AssumeRole'] + } + ] + }, + 'amazonaws.com.cn': { + 'Statement': [ + { + 'Principal': { + 'Service': ['ec2.amazonaws.com.cn'] + }, + 'Effect': 'Allow', + 'Action': ['sts:AssumeRole'] + } + ] + }, +} +# For backward-compatibility, we'll preserve this here. +ASSUME_ROLE_POLICY_DOCUMENT = json.dumps(DEFAULT_POLICY_DOCUMENTS['default']) + + +class IAMConnection(AWSQueryConnection): + + APIVersion = '2010-05-08' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, host='iam.amazonaws.com', + debug=0, https_connection_factory=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + super(IAMConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, + proxy_port, proxy_user, proxy_pass, + host, debug, https_connection_factory, + path, security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def get_response(self, action, params, path='/', parent=None, + verb='POST', list_marker='Set'): + """ + Utility method to handle calls to IAM and parsing of responses. + """ + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if response.status == 200: + if body: + e = boto.jsonresponse.Element(list_marker=list_marker, + pythonize_name=True) + h = boto.jsonresponse.XmlHandler(e, parent) + h.parse(body) + return e + else: + # Support empty responses, e.g. deleting a SAML provider + # according to the official documentation. + return {} + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + # + # Group methods + # + + def get_all_groups(self, path_prefix='/', marker=None, max_items=None): + """ + List the groups that have the specified path prefix. + + :type path_prefix: string + :param path_prefix: If provided, only groups whose paths match + the provided prefix will be returned. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {} + if path_prefix: + params['PathPrefix'] = path_prefix + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListGroups', params, + list_marker='Groups') + + def get_group(self, group_name, marker=None, max_items=None): + """ + Return a list of users that are in the specified group. + + :type group_name: string + :param group_name: The name of the group whose information should + be returned. + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {'GroupName': group_name} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('GetGroup', params, list_marker='Users') + + def create_group(self, group_name, path='/'): + """ + Create a group. + + :type group_name: string + :param group_name: The name of the new group + + :type path: string + :param path: The path to the group (Optional). Defaults to /. + + """ + params = {'GroupName': group_name, + 'Path': path} + return self.get_response('CreateGroup', params) + + def delete_group(self, group_name): + """ + Delete a group. The group must not contain any Users or + have any attached policies + + :type group_name: string + :param group_name: The name of the group to delete. + + """ + params = {'GroupName': group_name} + return self.get_response('DeleteGroup', params) + + def update_group(self, group_name, new_group_name=None, new_path=None): + """ + Updates name and/or path of the specified group. + + :type group_name: string + :param group_name: The name of the new group + + :type new_group_name: string + :param new_group_name: If provided, the name of the group will be + changed to this name. + + :type new_path: string + :param new_path: If provided, the path of the group will be + changed to this path. + + """ + params = {'GroupName': group_name} + if new_group_name: + params['NewGroupName'] = new_group_name + if new_path: + params['NewPath'] = new_path + return self.get_response('UpdateGroup', params) + + def add_user_to_group(self, group_name, user_name): + """ + Add a user to a group + + :type group_name: string + :param group_name: The name of the group + + :type user_name: string + :param user_name: The to be added to the group. + + """ + params = {'GroupName': group_name, + 'UserName': user_name} + return self.get_response('AddUserToGroup', params) + + def remove_user_from_group(self, group_name, user_name): + """ + Remove a user from a group. + + :type group_name: string + :param group_name: The name of the group + + :type user_name: string + :param user_name: The user to remove from the group. + + """ + params = {'GroupName': group_name, + 'UserName': user_name} + return self.get_response('RemoveUserFromGroup', params) + + def put_group_policy(self, group_name, policy_name, policy_json): + """ + Adds or updates the specified policy document for the specified group. + + :type group_name: string + :param group_name: The name of the group the policy is associated with. + + :type policy_name: string + :param policy_name: The policy document to get. + + :type policy_json: string + :param policy_json: The policy document. + + """ + params = {'GroupName': group_name, + 'PolicyName': policy_name, + 'PolicyDocument': policy_json} + return self.get_response('PutGroupPolicy', params, verb='POST') + + def get_all_group_policies(self, group_name, marker=None, max_items=None): + """ + List the names of the policies associated with the specified group. + + :type group_name: string + :param group_name: The name of the group the policy is associated with. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {'GroupName': group_name} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListGroupPolicies', params, + list_marker='PolicyNames') + + def get_group_policy(self, group_name, policy_name): + """ + Retrieves the specified policy document for the specified group. + + :type group_name: string + :param group_name: The name of the group the policy is associated with. + + :type policy_name: string + :param policy_name: The policy document to get. + + """ + params = {'GroupName': group_name, + 'PolicyName': policy_name} + return self.get_response('GetGroupPolicy', params, verb='POST') + + def delete_group_policy(self, group_name, policy_name): + """ + Deletes the specified policy document for the specified group. + + :type group_name: string + :param group_name: The name of the group the policy is associated with. + + :type policy_name: string + :param policy_name: The policy document to delete. + + """ + params = {'GroupName': group_name, + 'PolicyName': policy_name} + return self.get_response('DeleteGroupPolicy', params, verb='POST') + + def get_all_users(self, path_prefix='/', marker=None, max_items=None): + """ + List the users that have the specified path prefix. + + :type path_prefix: string + :param path_prefix: If provided, only users whose paths match + the provided prefix will be returned. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {'PathPrefix': path_prefix} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListUsers', params, list_marker='Users') + + # + # User methods + # + + def create_user(self, user_name, path='/'): + """ + Create a user. + + :type user_name: string + :param user_name: The name of the new user + + :type path: string + :param path: The path in which the user will be created. + Defaults to /. + + """ + params = {'UserName': user_name, + 'Path': path} + return self.get_response('CreateUser', params) + + def delete_user(self, user_name): + """ + Delete a user including the user's path, GUID and ARN. + + If the user_name is not specified, the user_name is determined + implicitly based on the AWS Access Key ID used to sign the request. + + :type user_name: string + :param user_name: The name of the user to delete. + + """ + params = {'UserName': user_name} + return self.get_response('DeleteUser', params) + + def get_user(self, user_name=None): + """ + Retrieve information about the specified user. + + If the user_name is not specified, the user_name is determined + implicitly based on the AWS Access Key ID used to sign the request. + + :type user_name: string + :param user_name: The name of the user to retrieve. + If not specified, defaults to user making request. + """ + params = {} + if user_name: + params['UserName'] = user_name + return self.get_response('GetUser', params) + + def update_user(self, user_name, new_user_name=None, new_path=None): + """ + Updates name and/or path of the specified user. + + :type user_name: string + :param user_name: The name of the user + + :type new_user_name: string + :param new_user_name: If provided, the username of the user will be + changed to this username. + + :type new_path: string + :param new_path: If provided, the path of the user will be + changed to this path. + + """ + params = {'UserName': user_name} + if new_user_name: + params['NewUserName'] = new_user_name + if new_path: + params['NewPath'] = new_path + return self.get_response('UpdateUser', params) + + def get_all_user_policies(self, user_name, marker=None, max_items=None): + """ + List the names of the policies associated with the specified user. + + :type user_name: string + :param user_name: The name of the user the policy is associated with. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {'UserName': user_name} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListUserPolicies', params, + list_marker='PolicyNames') + + def put_user_policy(self, user_name, policy_name, policy_json): + """ + Adds or updates the specified policy document for the specified user. + + :type user_name: string + :param user_name: The name of the user the policy is associated with. + + :type policy_name: string + :param policy_name: The policy document to get. + + :type policy_json: string + :param policy_json: The policy document. + + """ + params = {'UserName': user_name, + 'PolicyName': policy_name, + 'PolicyDocument': policy_json} + return self.get_response('PutUserPolicy', params, verb='POST') + + def get_user_policy(self, user_name, policy_name): + """ + Retrieves the specified policy document for the specified user. + + :type user_name: string + :param user_name: The name of the user the policy is associated with. + + :type policy_name: string + :param policy_name: The policy document to get. + + """ + params = {'UserName': user_name, + 'PolicyName': policy_name} + return self.get_response('GetUserPolicy', params, verb='POST') + + def delete_user_policy(self, user_name, policy_name): + """ + Deletes the specified policy document for the specified user. + + :type user_name: string + :param user_name: The name of the user the policy is associated with. + + :type policy_name: string + :param policy_name: The policy document to delete. + + """ + params = {'UserName': user_name, + 'PolicyName': policy_name} + return self.get_response('DeleteUserPolicy', params, verb='POST') + + def get_groups_for_user(self, user_name, marker=None, max_items=None): + """ + List the groups that a specified user belongs to. + + :type user_name: string + :param user_name: The name of the user to list groups for. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {'UserName': user_name} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListGroupsForUser', params, + list_marker='Groups') + + # + # Access Keys + # + + def get_all_access_keys(self, user_name, marker=None, max_items=None): + """ + Get all access keys associated with an account. + + :type user_name: string + :param user_name: The username of the user + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {'UserName': user_name} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListAccessKeys', params, + list_marker='AccessKeyMetadata') + + def create_access_key(self, user_name=None): + """ + Create a new AWS Secret Access Key and corresponding AWS Access Key ID + for the specified user. The default status for new keys is Active + + If the user_name is not specified, the user_name is determined + implicitly based on the AWS Access Key ID used to sign the request. + + :type user_name: string + :param user_name: The username of the user + + """ + params = {'UserName': user_name} + return self.get_response('CreateAccessKey', params) + + def update_access_key(self, access_key_id, status, user_name=None): + """ + Changes the status of the specified access key from Active to Inactive + or vice versa. This action can be used to disable a user's key as + part of a key rotation workflow. + + If the user_name is not specified, the user_name is determined + implicitly based on the AWS Access Key ID used to sign the request. + + :type access_key_id: string + :param access_key_id: The ID of the access key. + + :type status: string + :param status: Either Active or Inactive. + + :type user_name: string + :param user_name: The username of user (optional). + + """ + params = {'AccessKeyId': access_key_id, + 'Status': status} + if user_name: + params['UserName'] = user_name + return self.get_response('UpdateAccessKey', params) + + def delete_access_key(self, access_key_id, user_name=None): + """ + Delete an access key associated with a user. + + If the user_name is not specified, it is determined implicitly based + on the AWS Access Key ID used to sign the request. + + :type access_key_id: string + :param access_key_id: The ID of the access key to be deleted. + + :type user_name: string + :param user_name: The username of the user + + """ + params = {'AccessKeyId': access_key_id} + if user_name: + params['UserName'] = user_name + return self.get_response('DeleteAccessKey', params) + + # + # Signing Certificates + # + + def get_all_signing_certs(self, marker=None, max_items=None, + user_name=None): + """ + Get all signing certificates associated with an account. + + If the user_name is not specified, it is determined implicitly based + on the AWS Access Key ID used to sign the request. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + + :type user_name: string + :param user_name: The username of the user + + """ + params = {} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + if user_name: + params['UserName'] = user_name + return self.get_response('ListSigningCertificates', + params, list_marker='Certificates') + + def update_signing_cert(self, cert_id, status, user_name=None): + """ + Change the status of the specified signing certificate from + Active to Inactive or vice versa. + + If the user_name is not specified, it is determined implicitly based + on the AWS Access Key ID used to sign the request. + + :type cert_id: string + :param cert_id: The ID of the signing certificate + + :type status: string + :param status: Either Active or Inactive. + + :type user_name: string + :param user_name: The username of the user + """ + params = {'CertificateId': cert_id, + 'Status': status} + if user_name: + params['UserName'] = user_name + return self.get_response('UpdateSigningCertificate', params) + + def upload_signing_cert(self, cert_body, user_name=None): + """ + Uploads an X.509 signing certificate and associates it with + the specified user. + + If the user_name is not specified, it is determined implicitly based + on the AWS Access Key ID used to sign the request. + + :type cert_body: string + :param cert_body: The body of the signing certificate. + + :type user_name: string + :param user_name: The username of the user + + """ + params = {'CertificateBody': cert_body} + if user_name: + params['UserName'] = user_name + return self.get_response('UploadSigningCertificate', params, + verb='POST') + + def delete_signing_cert(self, cert_id, user_name=None): + """ + Delete a signing certificate associated with a user. + + If the user_name is not specified, it is determined implicitly based + on the AWS Access Key ID used to sign the request. + + :type user_name: string + :param user_name: The username of the user + + :type cert_id: string + :param cert_id: The ID of the certificate. + + """ + params = {'CertificateId': cert_id} + if user_name: + params['UserName'] = user_name + return self.get_response('DeleteSigningCertificate', params) + + # + # Server Certificates + # + + def list_server_certs(self, path_prefix='/', + marker=None, max_items=None): + """ + Lists the server certificates that have the specified path prefix. + If none exist, the action returns an empty list. + + :type path_prefix: string + :param path_prefix: If provided, only certificates whose paths match + the provided prefix will be returned. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + + """ + params = {} + if path_prefix: + params['PathPrefix'] = path_prefix + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListServerCertificates', + params, + list_marker='ServerCertificateMetadataList') + + # Preserves backwards compatibility. + # TODO: Look into deprecating this eventually? + get_all_server_certs = list_server_certs + + def update_server_cert(self, cert_name, new_cert_name=None, + new_path=None): + """ + Updates the name and/or the path of the specified server certificate. + + :type cert_name: string + :param cert_name: The name of the server certificate that you want + to update. + + :type new_cert_name: string + :param new_cert_name: The new name for the server certificate. + Include this only if you are updating the + server certificate's name. + + :type new_path: string + :param new_path: If provided, the path of the certificate will be + changed to this path. + """ + params = {'ServerCertificateName': cert_name} + if new_cert_name: + params['NewServerCertificateName'] = new_cert_name + if new_path: + params['NewPath'] = new_path + return self.get_response('UpdateServerCertificate', params) + + def upload_server_cert(self, cert_name, cert_body, private_key, + cert_chain=None, path=None): + """ + Uploads a server certificate entity for the AWS Account. + The server certificate entity includes a public key certificate, + a private key, and an optional certificate chain, which should + all be PEM-encoded. + + :type cert_name: string + :param cert_name: The name for the server certificate. Do not + include the path in this value. + + :type cert_body: string + :param cert_body: The contents of the public key certificate + in PEM-encoded format. + + :type private_key: string + :param private_key: The contents of the private key in + PEM-encoded format. + + :type cert_chain: string + :param cert_chain: The contents of the certificate chain. This + is typically a concatenation of the PEM-encoded + public key certificates of the chain. + + :type path: string + :param path: The path for the server certificate. + """ + params = {'ServerCertificateName': cert_name, + 'CertificateBody': cert_body, + 'PrivateKey': private_key} + if cert_chain: + params['CertificateChain'] = cert_chain + if path: + params['Path'] = path + return self.get_response('UploadServerCertificate', params, + verb='POST') + + def get_server_certificate(self, cert_name): + """ + Retrieves information about the specified server certificate. + + :type cert_name: string + :param cert_name: The name of the server certificate you want + to retrieve information about. + + """ + params = {'ServerCertificateName': cert_name} + return self.get_response('GetServerCertificate', params) + + def delete_server_cert(self, cert_name): + """ + Delete the specified server certificate. + + :type cert_name: string + :param cert_name: The name of the server certificate you want + to delete. + + """ + params = {'ServerCertificateName': cert_name} + return self.get_response('DeleteServerCertificate', params) + + # + # MFA Devices + # + + def get_all_mfa_devices(self, user_name, marker=None, max_items=None): + """ + Get all MFA devices associated with an account. + + :type user_name: string + :param user_name: The username of the user + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + + """ + params = {'UserName': user_name} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListMFADevices', + params, list_marker='MFADevices') + + def enable_mfa_device(self, user_name, serial_number, + auth_code_1, auth_code_2): + """ + Enables the specified MFA device and associates it with the + specified user. + + :type user_name: string + :param user_name: The username of the user + + :type serial_number: string + :param serial_number: The serial number which uniquely identifies + the MFA device. + + :type auth_code_1: string + :param auth_code_1: An authentication code emitted by the device. + + :type auth_code_2: string + :param auth_code_2: A subsequent authentication code emitted + by the device. + + """ + params = {'UserName': user_name, + 'SerialNumber': serial_number, + 'AuthenticationCode1': auth_code_1, + 'AuthenticationCode2': auth_code_2} + return self.get_response('EnableMFADevice', params) + + def deactivate_mfa_device(self, user_name, serial_number): + """ + Deactivates the specified MFA device and removes it from + association with the user. + + :type user_name: string + :param user_name: The username of the user + + :type serial_number: string + :param serial_number: The serial number which uniquely identifies + the MFA device. + + """ + params = {'UserName': user_name, + 'SerialNumber': serial_number} + return self.get_response('DeactivateMFADevice', params) + + def resync_mfa_device(self, user_name, serial_number, + auth_code_1, auth_code_2): + """ + Syncronizes the specified MFA device with the AWS servers. + + :type user_name: string + :param user_name: The username of the user + + :type serial_number: string + :param serial_number: The serial number which uniquely identifies + the MFA device. + + :type auth_code_1: string + :param auth_code_1: An authentication code emitted by the device. + + :type auth_code_2: string + :param auth_code_2: A subsequent authentication code emitted + by the device. + + """ + params = {'UserName': user_name, + 'SerialNumber': serial_number, + 'AuthenticationCode1': auth_code_1, + 'AuthenticationCode2': auth_code_2} + return self.get_response('ResyncMFADevice', params) + + # + # Login Profiles + # + + def get_login_profiles(self, user_name): + """ + Retrieves the login profile for the specified user. + + :type user_name: string + :param user_name: The username of the user + + """ + params = {'UserName': user_name} + return self.get_response('GetLoginProfile', params) + + def create_login_profile(self, user_name, password): + """ + Creates a login profile for the specified user, give the user the + ability to access AWS services and the AWS Management Console. + + :type user_name: string + :param user_name: The name of the user + + :type password: string + :param password: The new password for the user + + """ + params = {'UserName': user_name, + 'Password': password} + return self.get_response('CreateLoginProfile', params) + + def delete_login_profile(self, user_name): + """ + Deletes the login profile associated with the specified user. + + :type user_name: string + :param user_name: The name of the user to delete. + + """ + params = {'UserName': user_name} + return self.get_response('DeleteLoginProfile', params) + + def update_login_profile(self, user_name, password): + """ + Resets the password associated with the user's login profile. + + :type user_name: string + :param user_name: The name of the user + + :type password: string + :param password: The new password for the user + + """ + params = {'UserName': user_name, + 'Password': password} + return self.get_response('UpdateLoginProfile', params) + + def create_account_alias(self, alias): + """ + Creates a new alias for the AWS account. + + For more information on account id aliases, please see + http://goo.gl/ToB7G + + :type alias: string + :param alias: The alias to attach to the account. + """ + params = {'AccountAlias': alias} + return self.get_response('CreateAccountAlias', params) + + def delete_account_alias(self, alias): + """ + Deletes an alias for the AWS account. + + For more information on account id aliases, please see + http://goo.gl/ToB7G + + :type alias: string + :param alias: The alias to remove from the account. + """ + params = {'AccountAlias': alias} + return self.get_response('DeleteAccountAlias', params) + + def get_account_alias(self): + """ + Get the alias for the current account. + + This is referred to in the docs as list_account_aliases, + but it seems you can only have one account alias currently. + + For more information on account id aliases, please see + http://goo.gl/ToB7G + """ + return self.get_response('ListAccountAliases', {}, + list_marker='AccountAliases') + + def get_signin_url(self, service='ec2'): + """ + Get the URL where IAM users can use their login profile to sign in + to this account's console. + + :type service: string + :param service: Default service to go to in the console. + """ + alias = self.get_account_alias() + + if not alias: + raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.') + + resp = alias.get('list_account_aliases_response', {}) + result = resp.get('list_account_aliases_result', {}) + aliases = result.get('account_aliases', []) + + if not len(aliases): + raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.') + + # We'll just use the first one we find. + alias = aliases[0] + + if self.host == 'iam.us-gov.amazonaws.com': + return "https://%s.signin.amazonaws-us-gov.com/console/%s" % ( + alias, + service + ) + elif self.host.endswith('amazonaws.com.cn'): + return "https://%s.signin.amazonaws.cn/console/%s" % ( + alias, + service + ) + else: + return "https://%s.signin.aws.amazon.com/console/%s" % ( + alias, + service + ) + + def get_account_summary(self): + """ + Get the alias for the current account. + + This is referred to in the docs as list_account_aliases, + but it seems you can only have one account alias currently. + + For more information on account id aliases, please see + http://goo.gl/ToB7G + """ + return self.get_object('GetAccountSummary', {}, SummaryMap) + + # + # IAM Roles + # + + def add_role_to_instance_profile(self, instance_profile_name, role_name): + """ + Adds the specified role to the specified instance profile. + + :type instance_profile_name: string + :param instance_profile_name: Name of the instance profile to update. + + :type role_name: string + :param role_name: Name of the role to add. + """ + return self.get_response('AddRoleToInstanceProfile', + {'InstanceProfileName': instance_profile_name, + 'RoleName': role_name}) + + def create_instance_profile(self, instance_profile_name, path=None): + """ + Creates a new instance profile. + + :type instance_profile_name: string + :param instance_profile_name: Name of the instance profile to create. + + :type path: string + :param path: The path to the instance profile. + """ + params = {'InstanceProfileName': instance_profile_name} + if path is not None: + params['Path'] = path + return self.get_response('CreateInstanceProfile', params) + + def _build_policy(self, assume_role_policy_document=None): + if assume_role_policy_document is not None: + if isinstance(assume_role_policy_document, six.string_types): + # Historically, they had to pass a string. If it's a string, + # assume the user has already handled it. + return assume_role_policy_document + else: + + for tld, policy in DEFAULT_POLICY_DOCUMENTS.items(): + if tld is 'default': + # Skip the default. We'll fall back to it if we don't find + # anything. + continue + + if self.host and self.host.endswith(tld): + assume_role_policy_document = policy + break + + if not assume_role_policy_document: + assume_role_policy_document = DEFAULT_POLICY_DOCUMENTS['default'] + + # Dump the policy (either user-supplied ``dict`` or one of the defaults) + return json.dumps(assume_role_policy_document) + + def create_role(self, role_name, assume_role_policy_document=None, path=None): + """ + Creates a new role for your AWS account. + + The policy grants permission to an EC2 instance to assume the role. + The policy is URL-encoded according to RFC 3986. Currently, only EC2 + instances can assume roles. + + :type role_name: string + :param role_name: Name of the role to create. + + :type assume_role_policy_document: ``string`` or ``dict`` + :param assume_role_policy_document: The policy that grants an entity + permission to assume the role. + + :type path: string + :param path: The path to the role. + """ + params = { + 'RoleName': role_name, + 'AssumeRolePolicyDocument': self._build_policy( + assume_role_policy_document + ), + } + if path is not None: + params['Path'] = path + return self.get_response('CreateRole', params) + + def delete_instance_profile(self, instance_profile_name): + """ + Deletes the specified instance profile. The instance profile must not + have an associated role. + + :type instance_profile_name: string + :param instance_profile_name: Name of the instance profile to delete. + """ + return self.get_response( + 'DeleteInstanceProfile', + {'InstanceProfileName': instance_profile_name}) + + def delete_role(self, role_name): + """ + Deletes the specified role. The role must not have any policies + attached. + + :type role_name: string + :param role_name: Name of the role to delete. + """ + return self.get_response('DeleteRole', {'RoleName': role_name}) + + def delete_role_policy(self, role_name, policy_name): + """ + Deletes the specified policy associated with the specified role. + + :type role_name: string + :param role_name: Name of the role associated with the policy. + + :type policy_name: string + :param policy_name: Name of the policy to delete. + """ + return self.get_response( + 'DeleteRolePolicy', + {'RoleName': role_name, 'PolicyName': policy_name}) + + def get_instance_profile(self, instance_profile_name): + """ + Retrieves information about the specified instance profile, including + the instance profile's path, GUID, ARN, and role. + + :type instance_profile_name: string + :param instance_profile_name: Name of the instance profile to get + information about. + """ + return self.get_response('GetInstanceProfile', + {'InstanceProfileName': instance_profile_name}) + + def get_role(self, role_name): + """ + Retrieves information about the specified role, including the role's + path, GUID, ARN, and the policy granting permission to EC2 to assume + the role. + + :type role_name: string + :param role_name: Name of the role associated with the policy. + """ + return self.get_response('GetRole', {'RoleName': role_name}) + + def get_role_policy(self, role_name, policy_name): + """ + Retrieves the specified policy document for the specified role. + + :type role_name: string + :param role_name: Name of the role associated with the policy. + + :type policy_name: string + :param policy_name: Name of the policy to get. + """ + return self.get_response('GetRolePolicy', + {'RoleName': role_name, + 'PolicyName': policy_name}) + + def list_instance_profiles(self, path_prefix=None, marker=None, + max_items=None): + """ + Lists the instance profiles that have the specified path prefix. If + there are none, the action returns an empty list. + + :type path_prefix: string + :param path_prefix: The path prefix for filtering the results. For + example: /application_abc/component_xyz/, which would get all + instance profiles whose path starts with + /application_abc/component_xyz/. + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response + where the results are truncated. Set it to the value of the + Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this parameter only when paginating results to + indicate the maximum number of user names you want in the response. + """ + params = {} + if path_prefix is not None: + params['PathPrefix'] = path_prefix + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + + return self.get_response('ListInstanceProfiles', params, + list_marker='InstanceProfiles') + + def list_instance_profiles_for_role(self, role_name, marker=None, + max_items=None): + """ + Lists the instance profiles that have the specified associated role. If + there are none, the action returns an empty list. + + :type role_name: string + :param role_name: The name of the role to list instance profiles for. + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response + where the results are truncated. Set it to the value of the + Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this parameter only when paginating results to + indicate the maximum number of user names you want in the response. + """ + params = {'RoleName': role_name} + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + return self.get_response('ListInstanceProfilesForRole', params, + list_marker='InstanceProfiles') + + def list_role_policies(self, role_name, marker=None, max_items=None): + """ + Lists the names of the policies associated with the specified role. If + there are none, the action returns an empty list. + + :type role_name: string + :param role_name: The name of the role to list policies for. + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response + where the results are truncated. Set it to the value of the + marker element in the response you just received. + + :type max_items: int + :param max_items: Use this parameter only when paginating results to + indicate the maximum number of user names you want in the response. + """ + params = {'RoleName': role_name} + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + return self.get_response('ListRolePolicies', params, + list_marker='PolicyNames') + + def list_roles(self, path_prefix=None, marker=None, max_items=None): + """ + Lists the roles that have the specified path prefix. If there are none, + the action returns an empty list. + + :type path_prefix: string + :param path_prefix: The path prefix for filtering the results. + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response + where the results are truncated. Set it to the value of the + marker element in the response you just received. + + :type max_items: int + :param max_items: Use this parameter only when paginating results to + indicate the maximum number of user names you want in the response. + """ + params = {} + if path_prefix is not None: + params['PathPrefix'] = path_prefix + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + return self.get_response('ListRoles', params, list_marker='Roles') + + def put_role_policy(self, role_name, policy_name, policy_document): + """ + Adds (or updates) a policy document associated with the specified role. + + :type role_name: string + :param role_name: Name of the role to associate the policy with. + + :type policy_name: string + :param policy_name: Name of the policy document. + + :type policy_document: string + :param policy_document: The policy document. + """ + return self.get_response('PutRolePolicy', + {'RoleName': role_name, + 'PolicyName': policy_name, + 'PolicyDocument': policy_document}) + + def remove_role_from_instance_profile(self, instance_profile_name, + role_name): + """ + Removes the specified role from the specified instance profile. + + :type instance_profile_name: string + :param instance_profile_name: Name of the instance profile to update. + + :type role_name: string + :param role_name: Name of the role to remove. + """ + return self.get_response('RemoveRoleFromInstanceProfile', + {'InstanceProfileName': instance_profile_name, + 'RoleName': role_name}) + + def update_assume_role_policy(self, role_name, policy_document): + """ + Updates the policy that grants an entity permission to assume a role. + Currently, only an Amazon EC2 instance can assume a role. + + :type role_name: string + :param role_name: Name of the role to update. + + :type policy_document: string + :param policy_document: The policy that grants an entity permission to + assume the role. + """ + return self.get_response('UpdateAssumeRolePolicy', + {'RoleName': role_name, + 'PolicyDocument': policy_document}) + + def create_saml_provider(self, saml_metadata_document, name): + """ + Creates an IAM entity to describe an identity provider (IdP) + that supports SAML 2.0. + + The SAML provider that you create with this operation can be + used as a principal in a role's trust policy to establish a + trust relationship between AWS and a SAML identity provider. + You can create an IAM role that supports Web-based single + sign-on (SSO) to the AWS Management Console or one that + supports API access to AWS. + + When you create the SAML provider, you upload an a SAML + metadata document that you get from your IdP and that includes + the issuer's name, expiration information, and keys that can + be used to validate the SAML authentication response + (assertions) that are received from the IdP. You must generate + the metadata document using the identity management software + that is used as your organization's IdP. + This operation requires `Signature Version 4`_. + For more information, see `Giving Console Access Using SAML`_ + and `Creating Temporary Security Credentials for SAML + Federation`_ in the Using Temporary Credentials guide. + + :type saml_metadata_document: string + :param saml_metadata_document: An XML document generated by an identity + provider (IdP) that supports SAML 2.0. The document includes the + issuer's name, expiration information, and keys that can be used to + validate the SAML authentication response (assertions) that are + received from the IdP. You must generate the metadata document + using the identity management software that is used as your + organization's IdP. + For more information, see `Creating Temporary Security Credentials for + SAML Federation`_ in the Using Temporary Security Credentials + guide. + + :type name: string + :param name: The name of the provider to create. + + """ + params = { + 'SAMLMetadataDocument': saml_metadata_document, + 'Name': name, + } + return self.get_response('CreateSAMLProvider', params) + + def list_saml_providers(self): + """ + Lists the SAML providers in the account. + This operation requires `Signature Version 4`_. + """ + return self.get_response('ListSAMLProviders', {}, list_marker='SAMLProviderList') + + def get_saml_provider(self, saml_provider_arn): + """ + Returns the SAML provider metadocument that was uploaded when + the provider was created or updated. + This operation requires `Signature Version 4`_. + + :type saml_provider_arn: string + :param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML + provider to get information about. + + """ + params = {'SAMLProviderArn': saml_provider_arn} + return self.get_response('GetSAMLProvider', params) + + def update_saml_provider(self, saml_provider_arn, saml_metadata_document): + """ + Updates the metadata document for an existing SAML provider. + This operation requires `Signature Version 4`_. + + :type saml_provider_arn: string + :param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML + provider to update. + + :type saml_metadata_document: string + :param saml_metadata_document: An XML document generated by an identity + provider (IdP) that supports SAML 2.0. The document includes the + issuer's name, expiration information, and keys that can be used to + validate the SAML authentication response (assertions) that are + received from the IdP. You must generate the metadata document + using the identity management software that is used as your + organization's IdP. + + """ + params = { + 'SAMLMetadataDocument': saml_metadata_document, + 'SAMLProviderArn': saml_provider_arn, + } + return self.get_response('UpdateSAMLProvider', params) + + def delete_saml_provider(self, saml_provider_arn): + """ + Deletes a SAML provider. + + Deleting the provider does not update any roles that reference + the SAML provider as a principal in their trust policies. Any + attempt to assume a role that references a SAML provider that + has been deleted will fail. + This operation requires `Signature Version 4`_. + + :type saml_provider_arn: string + :param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML + provider to delete. + + """ + params = {'SAMLProviderArn': saml_provider_arn} + return self.get_response('DeleteSAMLProvider', params) + + # + # IAM Reports + # + + def generate_credential_report(self): + """ + Generates a credential report for an account + + A new credential report can only be generated every 4 hours. If one + hasn't been generated in the last 4 hours then get_credential_report + will error when called + """ + params = {} + return self.get_response('GenerateCredentialReport', params) + + def get_credential_report(self): + """ + Retrieves a credential report for an account + + A report must have been generated in the last 4 hours to succeed. + The report is returned as a base64 encoded blob within the response. + """ + params = {} + return self.get_response('GetCredentialReport', params) + + def create_virtual_mfa_device(self, path, device_name): + """ + Creates a new virtual MFA device for the AWS account. + + After creating the virtual MFA, use enable-mfa-device to + attach the MFA device to an IAM user. + + :type path: string + :param path: The path for the virtual MFA device. + + :type device_name: string + :param device_name: The name of the virtual MFA device. + Used with path to uniquely identify a virtual MFA device. + + """ + params = { + 'Path': path, + 'VirtualMFADeviceName': device_name + } + return self.get_response('CreateVirtualMFADevice', params) + + # + # IAM password policy + # + + def get_account_password_policy(self): + """ + Returns the password policy for the AWS account. + """ + params = {} + return self.get_response('GetAccountPasswordPolicy', params) + + def delete_account_password_policy(self): + """ + Delete the password policy currently set for the AWS account. + """ + params = {} + return self.get_response('DeleteAccountPasswordPolicy', params) + + def update_account_password_policy(self, allow_users_to_change_password=None, + hard_expiry=None, max_password_age=None , + minimum_password_length=None , + password_reuse_prevention=None, + require_lowercase_characters=None, + require_numbers=None, require_symbols=None , + require_uppercase_characters=None): + """ + Update the password policy for the AWS account. + + Notes: unset parameters will be reset to Amazon default settings! + Most of the password policy settings are enforced the next time your users + change their passwords. When you set minimum length and character type + requirements, they are enforced the next time your users change their + passwords - users are not forced to change their existing passwords, even + if the pre-existing passwords do not adhere to the updated password + policy. When you set a password expiration period, the expiration period + is enforced immediately. + + :type allow_users_to_change_password: bool + :param allow_users_to_change_password: Allows all IAM users in your account + to use the AWS Management Console to change their own passwords. + + :type hard_expiry: bool + :param hard_expiry: Prevents IAM users from setting a new password after + their password has expired. + + :type max_password_age: int + :param max_password_age: The number of days that an IAM user password is valid. + + :type minimum_password_length: int + :param minimum_password_length: The minimum number of characters allowed in + an IAM user password. + + :type password_reuse_prevention: int + :param password_reuse_prevention: Specifies the number of previous passwords + that IAM users are prevented from reusing. + + :type require_lowercase_characters: bool + :param require_lowercase_characters: Specifies whether IAM user passwords + must contain at least one lowercase character from the ISO basic Latin + alphabet (``a`` to ``z``). + + :type require_numbers: bool + :param require_numbers: Specifies whether IAM user passwords must contain at + least one numeric character (``0`` to ``9``). + + :type require_symbols: bool + :param require_symbols: Specifies whether IAM user passwords must contain at + least one of the following non-alphanumeric characters: + ``! @ # $ % ^ & * ( ) _ + - = [ ] { } | '`` + + :type require_uppercase_characters: bool + :param require_uppercase_characters: Specifies whether IAM user passwords + must contain at least one uppercase character from the ISO basic Latin + alphabet (``A`` to ``Z``). + """ + params = {} + if allow_users_to_change_password is not None and type(allow_users_to_change_password) is bool: + params['AllowUsersToChangePassword'] = str(allow_users_to_change_password).lower() + if hard_expiry is not None and type(allow_users_to_change_password) is bool: + params['HardExpiry'] = str(hard_expiry).lower() + if max_password_age is not None: + params['MaxPasswordAge'] = max_password_age + if minimum_password_length is not None: + params['MinimumPasswordLength'] = minimum_password_length + if password_reuse_prevention is not None: + params['PasswordReusePrevention'] = password_reuse_prevention + if require_lowercase_characters is not None and type(allow_users_to_change_password) is bool: + params['RequireLowercaseCharacters'] = str(require_lowercase_characters).lower() + if require_numbers is not None and type(allow_users_to_change_password) is bool: + params['RequireNumbers'] = str(require_numbers).lower() + if require_symbols is not None and type(allow_users_to_change_password) is bool: + params['RequireSymbols'] = str(require_symbols).lower() + if require_uppercase_characters is not None and type(allow_users_to_change_password) is bool: + params['RequireUppercaseCharacters'] = str(require_uppercase_characters).lower() + return self.get_response('UpdateAccountPasswordPolicy', params) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/iam/summarymap.py b/desktop/core/ext-py/boto-2.38.0/boto/iam/summarymap.py new file mode 100644 index 0000000000000000000000000000000000000000..b7976bb7e1e26d72681a79deb4ec6505a4e59d7b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/iam/summarymap.py @@ -0,0 +1,42 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class SummaryMap(dict): + + def __init__(self, parent=None): + self.parent = parent + dict.__init__(self) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'key': + self._name = value + elif name == 'value': + try: + self[self._name] = int(value) + except ValueError: + self[self._name] = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/jsonresponse.py b/desktop/core/ext-py/boto-2.38.0/boto/jsonresponse.py new file mode 100644 index 0000000000000000000000000000000000000000..f872b42990dad36efad943990fe37b70ed8f8ca9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/jsonresponse.py @@ -0,0 +1,168 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax +from boto import utils + + +class XmlHandler(xml.sax.ContentHandler): + + def __init__(self, root_node, connection): + self.connection = connection + self.nodes = [('root', root_node)] + self.current_text = '' + + def startElement(self, name, attrs): + self.current_text = '' + t = self.nodes[-1][1].startElement(name, attrs, self.connection) + if t is not None: + if isinstance(t, tuple): + self.nodes.append(t) + else: + self.nodes.append((name, t)) + + def endElement(self, name): + self.nodes[-1][1].endElement(name, self.current_text, self.connection) + if self.nodes[-1][0] == name: + self.nodes.pop() + self.current_text = '' + + def characters(self, content): + self.current_text += content + + def parse(self, s): + if not isinstance(s, bytes): + s = s.encode('utf-8') + xml.sax.parseString(s, self) + + +class Element(dict): + + def __init__(self, connection=None, element_name=None, + stack=None, parent=None, list_marker=('Set',), + item_marker=('member', 'item'), + pythonize_name=False): + dict.__init__(self) + self.connection = connection + self.element_name = element_name + self.list_marker = utils.mklist(list_marker) + self.item_marker = utils.mklist(item_marker) + if stack is None: + self.stack = [] + else: + self.stack = stack + self.pythonize_name = pythonize_name + self.parent = parent + + def __getattr__(self, key): + if key in self: + return self[key] + for k in self: + e = self[k] + if isinstance(e, Element): + try: + return getattr(e, key) + except AttributeError: + pass + raise AttributeError + + def get_name(self, name): + if self.pythonize_name: + name = utils.pythonize_name(name) + return name + + def startElement(self, name, attrs, connection): + self.stack.append(name) + for lm in self.list_marker: + if name.endswith(lm): + l = ListElement(self.connection, name, self.list_marker, + self.item_marker, self.pythonize_name) + self[self.get_name(name)] = l + return l + if len(self.stack) > 0: + element_name = self.stack[-1] + e = Element(self.connection, element_name, self.stack, self, + self.list_marker, self.item_marker, + self.pythonize_name) + self[self.get_name(element_name)] = e + return (element_name, e) + else: + return None + + def endElement(self, name, value, connection): + if len(self.stack) > 0: + self.stack.pop() + value = value.strip() + if value: + if isinstance(self.parent, Element): + self.parent[self.get_name(name)] = value + elif isinstance(self.parent, ListElement): + self.parent.append(value) + + +class ListElement(list): + + def __init__(self, connection=None, element_name=None, + list_marker=['Set'], item_marker=('member', 'item'), + pythonize_name=False): + list.__init__(self) + self.connection = connection + self.element_name = element_name + self.list_marker = list_marker + self.item_marker = item_marker + self.pythonize_name = pythonize_name + + def get_name(self, name): + if self.pythonize_name: + name = utils.pythonize_name(name) + return name + + def startElement(self, name, attrs, connection): + for lm in self.list_marker: + if name.endswith(lm): + l = ListElement(self.connection, name, + self.list_marker, self.item_marker, + self.pythonize_name) + setattr(self, self.get_name(name), l) + return l + if name in self.item_marker: + e = Element(self.connection, name, parent=self, + list_marker=self.list_marker, + item_marker=self.item_marker, + pythonize_name=self.pythonize_name) + self.append(e) + return e + else: + return None + + def endElement(self, name, value, connection): + if name == self.element_name: + if len(self) > 0: + empty = [] + for e in self: + if isinstance(e, Element): + if len(e) == 0: + empty.append(e) + for e in empty: + self.remove(e) + else: + setattr(self, self.get_name(name), value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/kinesis/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/kinesis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5fc33c742a170fea702af9cdaf1e1c0a355bb335 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/kinesis/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon Kinesis service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.kinesis.layer1 import KinesisConnection + return get_regions('kinesis', connection_cls=KinesisConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/kinesis/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/kinesis/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..708f46369a473cf4714a2f9e00e335c9a2f456f9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/kinesis/exceptions.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class ProvisionedThroughputExceededException(BotoServerError): + pass + + +class LimitExceededException(BotoServerError): + pass + + +class ExpiredIteratorException(BotoServerError): + pass + + +class ResourceInUseException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class InvalidArgumentException(BotoServerError): + pass + + +class SubscriptionRequiredException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/kinesis/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/kinesis/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..f1910ff4e9e6c20a161f5cefd2c2170b778a254b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/kinesis/layer1.py @@ -0,0 +1,875 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import base64 +import boto + +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.kinesis import exceptions +from boto.compat import json +from boto.compat import six + + +class KinesisConnection(AWSQueryConnection): + """ + Amazon Kinesis Service API Reference + Amazon Kinesis is a managed service that scales elastically for + real time processing of streaming big data. + """ + APIVersion = "2013-12-02" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com" + ServiceName = "Kinesis" + TargetPrefix = "Kinesis_20131202" + ResponseError = JSONResponseError + + _faults = { + "ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException, + "LimitExceededException": exceptions.LimitExceededException, + "ExpiredIteratorException": exceptions.ExpiredIteratorException, + "ResourceInUseException": exceptions.ResourceInUseException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InvalidArgumentException": exceptions.InvalidArgumentException, + "SubscriptionRequiredException": exceptions.SubscriptionRequiredException + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + super(KinesisConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_tags_to_stream(self, stream_name, tags): + """ + Adds or updates tags for the specified Amazon Kinesis stream. + Each stream can have up to 10 tags. + + If tags have already been assigned to the stream, + `AddTagsToStream` overwrites any existing tags that correspond + to the specified tag keys. + + :type stream_name: string + :param stream_name: The name of the stream. + + :type tags: map + :param tags: The set of key-value pairs to use to create the tags. + + """ + params = {'StreamName': stream_name, 'Tags': tags, } + return self.make_request(action='AddTagsToStream', + body=json.dumps(params)) + + def create_stream(self, stream_name, shard_count): + """ + Creates a Amazon Kinesis stream. A stream captures and + transports data records that are continuously emitted from + different data sources or producers . Scale-out within an + Amazon Kinesis stream is explicitly supported by means of + shards, which are uniquely identified groups of data records + in an Amazon Kinesis stream. + + You specify and control the number of shards that a stream is + composed of. Each open shard can support up to 5 read + transactions per second, up to a maximum total of 2 MB of data + read per second. Each shard can support up to 1000 records + written per second, up to a maximum total of 1 MB data written + per second. You can add shards to a stream if the amount of + data input increases and you can remove shards if the amount + of data input decreases. + + The stream name identifies the stream. The name is scoped to + the AWS account used by the application. It is also scoped by + region. That is, two streams in two different accounts can + have the same name, and two streams in the same account, but + in two different regions, can have the same name. + + `CreateStream` is an asynchronous operation. Upon receiving a + `CreateStream` request, Amazon Kinesis immediately returns and + sets the stream status to `CREATING`. After the stream is + created, Amazon Kinesis sets the stream status to `ACTIVE`. + You should perform read and write operations only on an + `ACTIVE` stream. + + You receive a `LimitExceededException` when making a + `CreateStream` request if you try to do one of the following: + + + + Have more than five streams in the `CREATING` state at any + point in time. + + Create more shards than are authorized for your account. + + + The default limit for an AWS account is 10 shards per stream. + If you need to create a stream with more than 10 shards, + `contact AWS Support`_ to increase the limit on your account. + + You can use `DescribeStream` to check the stream status, which + is returned in `StreamStatus`. + + `CreateStream` has a limit of 5 transactions per second per + account. + + :type stream_name: string + :param stream_name: A name to identify the stream. The stream name is + scoped to the AWS account used by the application that creates the + stream. It is also scoped by region. That is, two streams in two + different AWS accounts can have the same name, and two streams in + the same AWS account, but in two different regions, can have the + same name. + + :type shard_count: integer + :param shard_count: The number of shards that the stream will use. The + throughput of the stream is a function of the number of shards; + more shards are required for greater provisioned throughput. + **Note:** The default limit for an AWS account is 10 shards per stream. + If you need to create a stream with more than 10 shards, `contact + AWS Support`_ to increase the limit on your account. + + """ + params = { + 'StreamName': stream_name, + 'ShardCount': shard_count, + } + return self.make_request(action='CreateStream', + body=json.dumps(params)) + + def delete_stream(self, stream_name): + """ + Deletes a stream and all its shards and data. You must shut + down any applications that are operating on the stream before + you delete the stream. If an application attempts to operate + on a deleted stream, it will receive the exception + `ResourceNotFoundException`. + + If the stream is in the `ACTIVE` state, you can delete it. + After a `DeleteStream` request, the specified stream is in the + `DELETING` state until Amazon Kinesis completes the deletion. + + **Note:** Amazon Kinesis might continue to accept data read + and write operations, such as PutRecord, PutRecords, and + GetRecords, on a stream in the `DELETING` state until the + stream deletion is complete. + + When you delete a stream, any shards in that stream are also + deleted, and any tags are dissociated from the stream. + + You can use the DescribeStream operation to check the state of + the stream, which is returned in `StreamStatus`. + + `DeleteStream` has a limit of 5 transactions per second per + account. + + :type stream_name: string + :param stream_name: The name of the stream to delete. + + """ + params = {'StreamName': stream_name, } + return self.make_request(action='DeleteStream', + body=json.dumps(params)) + + def describe_stream(self, stream_name, limit=None, + exclusive_start_shard_id=None): + """ + Describes the specified stream. + + The information about the stream includes its current status, + its Amazon Resource Name (ARN), and an array of shard objects. + For each shard object, there is information about the hash key + and sequence number ranges that the shard spans, and the IDs + of any earlier shards that played in a role in creating the + shard. A sequence number is the identifier associated with + every record ingested in the Amazon Kinesis stream. The + sequence number is assigned when a record is put into the + stream. + + You can limit the number of returned shards using the `Limit` + parameter. The number of shards in a stream may be too large + to return from a single call to `DescribeStream`. You can + detect this by using the `HasMoreShards` flag in the returned + output. `HasMoreShards` is set to `True` when there is more + data available. + + `DescribeStream` is a paginated operation. If there are more + shards available, you can request them using the shard ID of + the last shard returned. Specify this ID in the + `ExclusiveStartShardId` parameter in a subsequent request to + `DescribeStream`. + + `DescribeStream` has a limit of 10 transactions per second per + account. + + :type stream_name: string + :param stream_name: The name of the stream to describe. + + :type limit: integer + :param limit: The maximum number of shards to return. + + :type exclusive_start_shard_id: string + :param exclusive_start_shard_id: The shard ID of the shard to start + with. + + """ + params = {'StreamName': stream_name, } + if limit is not None: + params['Limit'] = limit + if exclusive_start_shard_id is not None: + params['ExclusiveStartShardId'] = exclusive_start_shard_id + return self.make_request(action='DescribeStream', + body=json.dumps(params)) + + def get_records(self, shard_iterator, limit=None, b64_decode=True): + """ + Gets data records from a shard. + + Specify a shard iterator using the `ShardIterator` parameter. + The shard iterator specifies the position in the shard from + which you want to start reading data records sequentially. If + there are no records available in the portion of the shard + that the iterator points to, `GetRecords` returns an empty + list. Note that it might take multiple calls to get to a + portion of the shard that contains records. + + You can scale by provisioning multiple shards. Your + application should have one thread per shard, each reading + continuously from its stream. To read from a stream + continually, call `GetRecords` in a loop. Use GetShardIterator + to get the shard iterator to specify in the first `GetRecords` + call. `GetRecords` returns a new shard iterator in + `NextShardIterator`. Specify the shard iterator returned in + `NextShardIterator` in subsequent calls to `GetRecords`. Note + that if the shard has been closed, the shard iterator can't + return more data and `GetRecords` returns `null` in + `NextShardIterator`. You can terminate the loop when the shard + is closed, or when the shard iterator reaches the record with + the sequence number or other attribute that marks it as the + last record to process. + + Each data record can be up to 50 KB in size, and each shard + can read up to 2 MB per second. You can ensure that your calls + don't exceed the maximum supported size or throughput by using + the `Limit` parameter to specify the maximum number of records + that `GetRecords` can return. Consider your average record + size when determining this limit. For example, if your average + record size is 40 KB, you can limit the data returned to about + 1 MB per call by specifying 25 as the limit. + + The size of the data returned by `GetRecords` will vary + depending on the utilization of the shard. The maximum size of + data that `GetRecords` can return is 10 MB. If a call returns + 10 MB of data, subsequent calls made within the next 5 seconds + throw `ProvisionedThroughputExceededException`. If there is + insufficient provisioned throughput on the shard, subsequent + calls made within the next 1 second throw + `ProvisionedThroughputExceededException`. Note that + `GetRecords` won't return any data when it throws an + exception. For this reason, we recommend that you wait one + second between calls to `GetRecords`; however, it's possible + that the application will get exceptions for longer than 1 + second. + + To detect whether the application is falling behind in + processing, add a timestamp to your records and note how long + it takes to process them. You can also monitor how much data + is in a stream using the CloudWatch metrics for write + operations ( `PutRecord` and `PutRecords`). For more + information, see `Monitoring Amazon Kinesis with Amazon + CloudWatch`_ in the Amazon Kinesis Developer Guide . + + :type shard_iterator: string + :param shard_iterator: The position in the shard from which you want to + start sequentially reading data records. A shard iterator specifies + this position using the sequence number of a data record in the + shard. + + :type limit: integer + :param limit: The maximum number of records to return. Specify a value + of up to 10,000. If you specify a value that is greater than + 10,000, `GetRecords` throws `InvalidArgumentException`. + + :type b64_decode: boolean + :param b64_decode: Decode the Base64-encoded ``Data`` field of records. + + """ + params = {'ShardIterator': shard_iterator, } + if limit is not None: + params['Limit'] = limit + + response = self.make_request(action='GetRecords', + body=json.dumps(params)) + + # Base64 decode the data + if b64_decode: + for record in response.get('Records', []): + record['Data'] = base64.b64decode( + record['Data'].encode('utf-8')).decode('utf-8') + + return response + + def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type, + starting_sequence_number=None): + """ + Gets a shard iterator. A shard iterator expires five minutes + after it is returned to the requester. + + A shard iterator specifies the position in the shard from + which to start reading data records sequentially. A shard + iterator specifies this position using the sequence number of + a data record in a shard. A sequence number is the identifier + associated with every record ingested in the Amazon Kinesis + stream. The sequence number is assigned when a record is put + into the stream. + + You must specify the shard iterator type. For example, you can + set the `ShardIteratorType` parameter to read exactly from the + position denoted by a specific sequence number by using the + `AT_SEQUENCE_NUMBER` shard iterator type, or right after the + sequence number by using the `AFTER_SEQUENCE_NUMBER` shard + iterator type, using sequence numbers returned by earlier + calls to PutRecord, PutRecords, GetRecords, or DescribeStream. + You can specify the shard iterator type `TRIM_HORIZON` in the + request to cause `ShardIterator` to point to the last + untrimmed record in the shard in the system, which is the + oldest data record in the shard. Or you can point to just + after the most recent record in the shard, by using the shard + iterator type `LATEST`, so that you always read the most + recent data in the shard. + + When you repeatedly read from an Amazon Kinesis stream use a + GetShardIterator request to get the first shard iterator to to + use in your first `GetRecords` request and then use the shard + iterator returned by the `GetRecords` request in + `NextShardIterator` for subsequent reads. A new shard iterator + is returned by every `GetRecords` request in + `NextShardIterator`, which you use in the `ShardIterator` + parameter of the next `GetRecords` request. + + If a `GetShardIterator` request is made too often, you receive + a `ProvisionedThroughputExceededException`. For more + information about throughput limits, see GetRecords. + + If the shard is closed, the iterator can't return more data, + and `GetShardIterator` returns `null` for its `ShardIterator`. + A shard can be closed using SplitShard or MergeShards. + + `GetShardIterator` has a limit of 5 transactions per second + per account per open shard. + + :type stream_name: string + :param stream_name: The name of the stream. + + :type shard_id: string + :param shard_id: The shard ID of the shard to get the iterator for. + + :type shard_iterator_type: string + :param shard_iterator_type: + Determines how the shard iterator is used to start reading data records + from the shard. + + The following are the valid shard iterator types: + + + + AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted + by a specific sequence number. + + AFTER_SEQUENCE_NUMBER - Start reading right after the position + denoted by a specific sequence number. + + TRIM_HORIZON - Start reading at the last untrimmed record in the + shard in the system, which is the oldest data record in the shard. + + LATEST - Start reading just after the most recent record in the + shard, so that you always read the most recent data in the shard. + + :type starting_sequence_number: string + :param starting_sequence_number: The sequence number of the data record + in the shard from which to start reading from. + + """ + params = { + 'StreamName': stream_name, + 'ShardId': shard_id, + 'ShardIteratorType': shard_iterator_type, + } + if starting_sequence_number is not None: + params['StartingSequenceNumber'] = starting_sequence_number + return self.make_request(action='GetShardIterator', + body=json.dumps(params)) + + def list_streams(self, limit=None, exclusive_start_stream_name=None): + """ + Lists your streams. + + The number of streams may be too large to return from a single + call to `ListStreams`. You can limit the number of returned + streams using the `Limit` parameter. If you do not specify a + value for the `Limit` parameter, Amazon Kinesis uses the + default limit, which is currently 10. + + You can detect if there are more streams available to list by + using the `HasMoreStreams` flag from the returned output. If + there are more streams available, you can request more streams + by using the name of the last stream returned by the + `ListStreams` request in the `ExclusiveStartStreamName` + parameter in a subsequent request to `ListStreams`. The group + of stream names returned by the subsequent request is then + added to the list. You can continue this process until all the + stream names have been collected in the list. + + `ListStreams` has a limit of 5 transactions per second per + account. + + :type limit: integer + :param limit: The maximum number of streams to list. + + :type exclusive_start_stream_name: string + :param exclusive_start_stream_name: The name of the stream to start the + list with. + + """ + params = {} + if limit is not None: + params['Limit'] = limit + if exclusive_start_stream_name is not None: + params['ExclusiveStartStreamName'] = exclusive_start_stream_name + return self.make_request(action='ListStreams', + body=json.dumps(params)) + + def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None, + limit=None): + """ + Lists the tags for the specified Amazon Kinesis stream. + + :type stream_name: string + :param stream_name: The name of the stream. + + :type exclusive_start_tag_key: string + :param exclusive_start_tag_key: The key to use as the starting point + for the list of tags. If this parameter is set, `ListTagsForStream` + gets all tags that occur after `ExclusiveStartTagKey`. + + :type limit: integer + :param limit: The number of tags to return. If this number is less than + the total number of tags associated with the stream, `HasMoreTags` + is set to `True`. To list additional tags, set + `ExclusiveStartTagKey` to the last key in the response. + + """ + params = {'StreamName': stream_name, } + if exclusive_start_tag_key is not None: + params['ExclusiveStartTagKey'] = exclusive_start_tag_key + if limit is not None: + params['Limit'] = limit + return self.make_request(action='ListTagsForStream', + body=json.dumps(params)) + + def merge_shards(self, stream_name, shard_to_merge, + adjacent_shard_to_merge): + """ + Merges two adjacent shards in a stream and combines them into + a single shard to reduce the stream's capacity to ingest and + transport data. Two shards are considered adjacent if the + union of the hash key ranges for the two shards form a + contiguous set with no gaps. For example, if you have two + shards, one with a hash key range of 276...381 and the other + with a hash key range of 382...454, then you could merge these + two shards into a single shard that would have a hash key + range of 276...454. After the merge, the single child shard + receives data for all hash key values covered by the two + parent shards. + + `MergeShards` is called when there is a need to reduce the + overall capacity of a stream because of excess capacity that + is not being used. You must specify the shard to be merged and + the adjacent shard for a stream. For more information about + merging shards, see `Merge Two Shards`_ in the Amazon Kinesis + Developer Guide . + + If the stream is in the `ACTIVE` state, you can call + `MergeShards`. If a stream is in the `CREATING`, `UPDATING`, + or `DELETING` state, `MergeShards` returns a + `ResourceInUseException`. If the specified stream does not + exist, `MergeShards` returns a `ResourceNotFoundException`. + + You can use DescribeStream to check the state of the stream, + which is returned in `StreamStatus`. + + `MergeShards` is an asynchronous operation. Upon receiving a + `MergeShards` request, Amazon Kinesis immediately returns a + response and sets the `StreamStatus` to `UPDATING`. After the + operation is completed, Amazon Kinesis sets the `StreamStatus` + to `ACTIVE`. Read and write operations continue to work while + the stream is in the `UPDATING` state. + + You use DescribeStream to determine the shard IDs that are + specified in the `MergeShards` request. + + If you try to operate on too many streams in parallel using + CreateStream, DeleteStream, `MergeShards` or SplitShard, you + will receive a `LimitExceededException`. + + `MergeShards` has limit of 5 transactions per second per + account. + + :type stream_name: string + :param stream_name: The name of the stream for the merge. + + :type shard_to_merge: string + :param shard_to_merge: The shard ID of the shard to combine with the + adjacent shard for the merge. + + :type adjacent_shard_to_merge: string + :param adjacent_shard_to_merge: The shard ID of the adjacent shard for + the merge. + + """ + params = { + 'StreamName': stream_name, + 'ShardToMerge': shard_to_merge, + 'AdjacentShardToMerge': adjacent_shard_to_merge, + } + return self.make_request(action='MergeShards', + body=json.dumps(params)) + + def put_record(self, stream_name, data, partition_key, + explicit_hash_key=None, + sequence_number_for_ordering=None, + exclusive_minimum_sequence_number=None, + b64_encode=True): + """ + This operation puts a data record into an Amazon Kinesis + stream from a producer. This operation must be called to send + data from the producer into the Amazon Kinesis stream for + real-time ingestion and subsequent processing. The `PutRecord` + operation requires the name of the stream that captures, + stores, and transports the data; a partition key; and the data + blob itself. The data blob could be a segment from a log file, + geographic/location data, website clickstream data, or any + other data type. + + The partition key is used to distribute data across shards. + Amazon Kinesis segregates the data records that belong to a + data stream into multiple shards, using the partition key + associated with each data record to determine which shard a + given data record belongs to. + + Partition keys are Unicode strings, with a maximum length + limit of 256 bytes. An MD5 hash function is used to map + partition keys to 128-bit integer values and to map associated + data records to shards using the hash key ranges of the + shards. You can override hashing the partition key to + determine the shard by explicitly specifying a hash value + using the `ExplicitHashKey` parameter. For more information, + see the `Amazon Kinesis Developer Guide`_. + + `PutRecord` returns the shard ID of where the data record was + placed and the sequence number that was assigned to the data + record. + + Sequence numbers generally increase over time. To guarantee + strictly increasing ordering, use the + `SequenceNumberForOrdering` parameter. For more information, + see the `Amazon Kinesis Developer Guide`_. + + If a `PutRecord` request cannot be processed because of + insufficient provisioned throughput on the shard involved in + the request, `PutRecord` throws + `ProvisionedThroughputExceededException`. + + Data records are accessible for only 24 hours from the time + that they are added to an Amazon Kinesis stream. + + :type stream_name: string + :param stream_name: The name of the stream to put the data record into. + + :type data: blob + :param data: The data blob to put into the record, which is + Base64-encoded when the blob is serialized. + The maximum size of the data blob (the payload after + Base64-decoding) is 50 kilobytes (KB) + Set `b64_encode` to disable automatic Base64 encoding. + + :type partition_key: string + :param partition_key: Determines which shard in the stream the data + record is assigned to. Partition keys are Unicode strings with a + maximum length limit of 256 bytes. Amazon Kinesis uses the + partition key as input to a hash function that maps the partition + key and associated data to a specific shard. Specifically, an MD5 + hash function is used to map partition keys to 128-bit integer + values and to map associated data records to shards. As a result of + this hashing mechanism, all data records with the same partition + key will map to the same shard within the stream. + + :type explicit_hash_key: string + :param explicit_hash_key: The hash value used to explicitly determine + the shard the data record is assigned to by overriding the + partition key hash. + + :type sequence_number_for_ordering: string + :param sequence_number_for_ordering: Guarantees strictly increasing + sequence numbers, for puts from the same client and to the same + partition key. Usage: set the `SequenceNumberForOrdering` of record + n to the sequence number of record n-1 (as returned in the + PutRecordResult when putting record n-1 ). If this parameter is not + set, records will be coarsely ordered based on arrival time. + + :type b64_encode: boolean + :param b64_encode: Whether to Base64 encode `data`. Can be set to + ``False`` if `data` is already encoded to prevent double encoding. + + """ + params = { + 'StreamName': stream_name, + 'Data': data, + 'PartitionKey': partition_key, + } + if explicit_hash_key is not None: + params['ExplicitHashKey'] = explicit_hash_key + if sequence_number_for_ordering is not None: + params['SequenceNumberForOrdering'] = sequence_number_for_ordering + if b64_encode: + if not isinstance(params['Data'], six.binary_type): + params['Data'] = params['Data'].encode('utf-8') + params['Data'] = base64.b64encode(params['Data']).decode('utf-8') + return self.make_request(action='PutRecord', + body=json.dumps(params)) + + def put_records(self, records, stream_name, b64_encode=True): + """ + Puts (writes) multiple data records from a producer into an + Amazon Kinesis stream in a single call (also referred to as a + `PutRecords` request). Use this operation to send data from a + data producer into the Amazon Kinesis stream for real-time + ingestion and processing. Each shard can support up to 1000 + records written per second, up to a maximum total of 1 MB data + written per second. + + You must specify the name of the stream that captures, stores, + and transports the data; and an array of request `Records`, + with each record in the array requiring a partition key and + data blob. + + The data blob can be any type of data; for example, a segment + from a log file, geographic/location data, website clickstream + data, and so on. + + The partition key is used by Amazon Kinesis as input to a hash + function that maps the partition key and associated data to a + specific shard. An MD5 hash function is used to map partition + keys to 128-bit integer values and to map associated data + records to shards. As a result of this hashing mechanism, all + data records with the same partition key map to the same shard + within the stream. For more information, see `Partition Key`_ + in the Amazon Kinesis Developer Guide . + + Each record in the `Records` array may include an optional + parameter, `ExplicitHashKey`, which overrides the partition + key to shard mapping. This parameter allows a data producer to + determine explicitly the shard where the record is stored. For + more information, see `Adding Multiple Records with + PutRecords`_ in the Amazon Kinesis Developer Guide . + + The `PutRecords` response includes an array of response + `Records`. Each record in the response array directly + correlates with a record in the request array using natural + ordering, from the top to the bottom of the request and + response. The response `Records` array always includes the + same number of records as the request array. + + The response `Records` array includes both successfully and + unsuccessfully processed records. Amazon Kinesis attempts to + process all records in each `PutRecords` request. A single + record failure does not stop the processing of subsequent + records. + + A successfully-processed record includes `ShardId` and + `SequenceNumber` values. The `ShardId` parameter identifies + the shard in the stream where the record is stored. The + `SequenceNumber` parameter is an identifier assigned to the + put record, unique to all records in the stream. + + An unsuccessfully-processed record includes `ErrorCode` and + `ErrorMessage` values. `ErrorCode` reflects the type of error + and can be one of the following values: + `ProvisionedThroughputExceededException` or `InternalFailure`. + `ErrorMessage` provides more detailed information about the + `ProvisionedThroughputExceededException` exception including + the account ID, stream name, and shard ID of the record that + was throttled. + + Data records are accessible for only 24 hours from the time + that they are added to an Amazon Kinesis stream. + + :type records: list + :param records: The records associated with the request. + + :type stream_name: string + :param stream_name: The stream name associated with the request. + + :type b64_encode: boolean + :param b64_encode: Whether to Base64 encode `data`. Can be set to + ``False`` if `data` is already encoded to prevent double encoding. + + """ + params = {'Records': records, 'StreamName': stream_name, } + if b64_encode: + for i in range(len(params['Records'])): + data = params['Records'][i]['Data'] + if not isinstance(data, six.binary_type): + data = data.encode('utf-8') + params['Records'][i]['Data'] = base64.b64encode( + data).decode('utf-8') + return self.make_request(action='PutRecords', + body=json.dumps(params)) + + def remove_tags_from_stream(self, stream_name, tag_keys): + """ + Deletes tags from the specified Amazon Kinesis stream. + + If you specify a tag that does not exist, it is ignored. + + :type stream_name: string + :param stream_name: The name of the stream. + + :type tag_keys: list + :param tag_keys: A list of tag keys. Each corresponding tag is removed + from the stream. + + """ + params = {'StreamName': stream_name, 'TagKeys': tag_keys, } + return self.make_request(action='RemoveTagsFromStream', + body=json.dumps(params)) + + def split_shard(self, stream_name, shard_to_split, new_starting_hash_key): + """ + Splits a shard into two new shards in the stream, to increase + the stream's capacity to ingest and transport data. + `SplitShard` is called when there is a need to increase the + overall capacity of stream because of an expected increase in + the volume of data records being ingested. + + You can also use `SplitShard` when a shard appears to be + approaching its maximum utilization, for example, when the set + of producers sending data into the specific shard are suddenly + sending more than previously anticipated. You can also call + `SplitShard` to increase stream capacity, so that more Amazon + Kinesis applications can simultaneously read data from the + stream for real-time processing. + + You must specify the shard to be split and the new hash key, + which is the position in the shard where the shard gets split + in two. In many cases, the new hash key might simply be the + average of the beginning and ending hash key, but it can be + any hash key value in the range being mapped into the shard. + For more information about splitting shards, see `Split a + Shard`_ in the Amazon Kinesis Developer Guide . + + You can use DescribeStream to determine the shard ID and hash + key values for the `ShardToSplit` and `NewStartingHashKey` + parameters that are specified in the `SplitShard` request. + + `SplitShard` is an asynchronous operation. Upon receiving a + `SplitShard` request, Amazon Kinesis immediately returns a + response and sets the stream status to `UPDATING`. After the + operation is completed, Amazon Kinesis sets the stream status + to `ACTIVE`. Read and write operations continue to work while + the stream is in the `UPDATING` state. + + You can use `DescribeStream` to check the status of the + stream, which is returned in `StreamStatus`. If the stream is + in the `ACTIVE` state, you can call `SplitShard`. If a stream + is in `CREATING` or `UPDATING` or `DELETING` states, + `DescribeStream` returns a `ResourceInUseException`. + + If the specified stream does not exist, `DescribeStream` + returns a `ResourceNotFoundException`. If you try to create + more shards than are authorized for your account, you receive + a `LimitExceededException`. + + The default limit for an AWS account is 10 shards per stream. + If you need to create a stream with more than 10 shards, + `contact AWS Support`_ to increase the limit on your account. + + If you try to operate on too many streams in parallel using + CreateStream, DeleteStream, MergeShards or SplitShard, you + receive a `LimitExceededException`. + + `SplitShard` has limit of 5 transactions per second per + account. + + :type stream_name: string + :param stream_name: The name of the stream for the shard split. + + :type shard_to_split: string + :param shard_to_split: The shard ID of the shard to split. + + :type new_starting_hash_key: string + :param new_starting_hash_key: A hash key value for the starting hash + key of one of the child shards created by the split. The hash key + range for a given shard constitutes a set of ordered contiguous + positive integers. The value for `NewStartingHashKey` must be in + the range of hash keys being mapped into the shard. The + `NewStartingHashKey` hash key value and all higher hash key values + in hash key range are distributed to one of the child shards. All + the lower hash key values in the range are distributed to the other + child shard. + + """ + params = { + 'StreamName': stream_name, + 'ShardToSplit': shard_to_split, + 'NewStartingHashKey': new_starting_hash_key, + } + return self.make_request(action='SplitShard', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response.getheaders()) + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/kms/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/kms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f6ded152820de239cb17228b5ec307ad5c67e3b3 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/kms/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS Key Management Service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.kms.layer1 import KMSConnection + return get_regions('kms', connection_cls=KMSConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/kms/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/kms/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..8b422560a4b31c109ec52916d6c21b0587143f82 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/kms/exceptions.py @@ -0,0 +1,72 @@ +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class InvalidGrantTokenException(BotoServerError): + pass + + +class DisabledException(BotoServerError): + pass + + +class LimitExceededException(BotoServerError): + pass + + +class DependencyTimeoutException(BotoServerError): + pass + + +class InvalidMarkerException(BotoServerError): + pass + + +class AlreadyExistsException(BotoServerError): + pass + + +class InvalidCiphertextException(BotoServerError): + pass + + +class KeyUnavailableException(BotoServerError): + pass + + +class InvalidAliasNameException(BotoServerError): + pass + + +class UnsupportedOperationException(BotoServerError): + pass + + +class InvalidArnException(BotoServerError): + pass + + +class KMSInternalException(BotoServerError): + pass + + +class InvalidKeyUsageException(BotoServerError): + pass + + +class MalformedPolicyDocumentException(BotoServerError): + pass + + +class NotFoundException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/kms/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/kms/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..f44cd048f7b1c5b78e17a6cb395ae5859374625d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/kms/layer1.py @@ -0,0 +1,821 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.kms import exceptions +from boto.compat import six +import base64 + + +class KMSConnection(AWSQueryConnection): + """ + AWS Key Management Service + AWS Key Management Service (KMS) is an encryption and key + management web service. This guide describes the KMS actions that + you can call programmatically. For general information about KMS, + see (need an address here). For the KMS developer guide, see (need + address here). + + AWS provides SDKs that consist of libraries and sample code for + various programming languages and platforms (Java, Ruby, .Net, + iOS, Android, etc.). The SDKs provide a convenient way to create + programmatic access to KMS and AWS. For example, the SDKs take + care of tasks such as signing requests (see below), managing + errors, and retrying requests automatically. For more information + about the AWS SDKs, including how to download and install them, + see `Tools for Amazon Web Services`_. + + We recommend that you use the AWS SDKs to make programmatic API + calls to KMS. However, you can also use the KMS Query API to make + to make direct calls to the KMS web service. + + **Signing Requests** + + Requests must be signed by using an access key ID and a secret + access key. We strongly recommend that you do not use your AWS + account access key ID and secret key for everyday work with KMS. + Instead, use the access key ID and secret access key for an IAM + user, or you can use the AWS Security Token Service to generate + temporary security credentials that you can use to sign requests. + + All KMS operations require `Signature Version 4`_. + + **Recording API Requests** + + KMS supports AWS CloudTrail, a service that records AWS API calls + and related events for your AWS account and delivers them to an + Amazon S3 bucket that you specify. By using the information + collected by CloudTrail, you can determine what requests were made + to KMS, who made the request, when it was made, and so on. To + learn more about CloudTrail, including how to turn it on and find + your log files, see the `AWS CloudTrail User Guide`_ + + **Additional Resources** + + For more information about credentials and request signing, see + the following: + + + + `AWS Security Credentials`_. This topic provides general + information about the types of credentials used for accessing AWS. + + `AWS Security Token Service`_. This guide describes how to + create and use temporary security credentials. + + `Signing AWS API Requests`_. This set of topics walks you + through the process of signing a request using an access key ID + and a secret access key. + """ + APIVersion = "2014-11-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "kms.us-east-1.amazonaws.com" + ServiceName = "KMS" + TargetPrefix = "TrentService" + ResponseError = JSONResponseError + + _faults = { + "InvalidGrantTokenException": exceptions.InvalidGrantTokenException, + "DisabledException": exceptions.DisabledException, + "LimitExceededException": exceptions.LimitExceededException, + "DependencyTimeoutException": exceptions.DependencyTimeoutException, + "InvalidMarkerException": exceptions.InvalidMarkerException, + "AlreadyExistsException": exceptions.AlreadyExistsException, + "InvalidCiphertextException": exceptions.InvalidCiphertextException, + "KeyUnavailableException": exceptions.KeyUnavailableException, + "InvalidAliasNameException": exceptions.InvalidAliasNameException, + "UnsupportedOperationException": exceptions.UnsupportedOperationException, + "InvalidArnException": exceptions.InvalidArnException, + "KMSInternalException": exceptions.KMSInternalException, + "InvalidKeyUsageException": exceptions.InvalidKeyUsageException, + "MalformedPolicyDocumentException": exceptions.MalformedPolicyDocumentException, + "NotFoundException": exceptions.NotFoundException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(KMSConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_alias(self, alias_name, target_key_id): + """ + Creates a display name for a customer master key. An alias can + be used to identify a key and should be unique. The console + enforces a one-to-one mapping between the alias and a key. An + alias name can contain only alphanumeric characters, forward + slashes (/), underscores (_), and dashes (-). An alias must + start with the word "alias" followed by a forward slash + (alias/). An alias that begins with "aws" after the forward + slash (alias/aws...) is reserved by Amazon Web Services (AWS). + + :type alias_name: string + :param alias_name: String that contains the display name. Aliases that + begin with AWS are reserved. + + :type target_key_id: string + :param target_key_id: An identifier of the key for which you are + creating the alias. This value cannot be another alias. + + """ + params = { + 'AliasName': alias_name, + 'TargetKeyId': target_key_id, + } + return self.make_request(action='CreateAlias', + body=json.dumps(params)) + + def create_grant(self, key_id, grantee_principal, + retiring_principal=None, operations=None, + constraints=None, grant_tokens=None): + """ + Adds a grant to a key to specify who can access the key and + under what conditions. Grants are alternate permission + mechanisms to key policies. If absent, access to the key is + evaluated based on IAM policies attached to the user. By + default, grants do not expire. Grants can be listed, retired, + or revoked as indicated by the following APIs. Typically, when + you are finished using a grant, you retire it. When you want + to end a grant immediately, revoke it. For more information + about grants, see `Grants`_. + + #. ListGrants + #. RetireGrant + #. RevokeGrant + + :type key_id: string + :param key_id: A unique key identifier for a customer master key. This + value can be a globally unique identifier, an ARN, or an alias. + + :type grantee_principal: string + :param grantee_principal: Principal given permission by the grant to + use the key identified by the `keyId` parameter. + + :type retiring_principal: string + :param retiring_principal: Principal given permission to retire the + grant. For more information, see RetireGrant. + + :type operations: list + :param operations: List of operations permitted by the grant. This can + be any combination of one or more of the following values: + + #. Decrypt + #. Encrypt + #. GenerateDataKey + #. GenerateDataKeyWithoutPlaintext + #. ReEncryptFrom + #. ReEncryptTo + #. CreateGrant + + :type constraints: dict + :param constraints: Specifies the conditions under which the actions + specified by the `Operations` parameter are allowed. + + :type grant_tokens: list + :param grant_tokens: List of grant tokens. + + """ + params = { + 'KeyId': key_id, + 'GranteePrincipal': grantee_principal, + } + if retiring_principal is not None: + params['RetiringPrincipal'] = retiring_principal + if operations is not None: + params['Operations'] = operations + if constraints is not None: + params['Constraints'] = constraints + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + return self.make_request(action='CreateGrant', + body=json.dumps(params)) + + def create_key(self, policy=None, description=None, key_usage=None): + """ + Creates a customer master key. Customer master keys can be + used to encrypt small amounts of data (less than 4K) directly, + but they are most commonly used to encrypt or envelope data + keys that are then used to encrypt customer data. For more + information about data keys, see GenerateDataKey and + GenerateDataKeyWithoutPlaintext. + + :type policy: string + :param policy: Policy to be attached to the key. This is required and + delegates back to the account. The key is the root of trust. + + :type description: string + :param description: Description of the key. We recommend that you + choose a description that helps your customer decide whether the + key is appropriate for a task. + + :type key_usage: string + :param key_usage: Specifies the intended use of the key. Currently this + defaults to ENCRYPT/DECRYPT, and only symmetric encryption and + decryption are supported. + + """ + params = {} + if policy is not None: + params['Policy'] = policy + if description is not None: + params['Description'] = description + if key_usage is not None: + params['KeyUsage'] = key_usage + return self.make_request(action='CreateKey', + body=json.dumps(params)) + + def decrypt(self, ciphertext_blob, encryption_context=None, + grant_tokens=None): + """ + Decrypts ciphertext. Ciphertext is plaintext that has been + previously encrypted by using the Encrypt function. + + :type ciphertext_blob: blob + :param ciphertext_blob: Ciphertext including metadata. + + :type encryption_context: map + :param encryption_context: The encryption context. If this was + specified in the Encrypt function, it must be specified here or the + decryption operation will fail. For more information, see + `Encryption Context`_. + + :type grant_tokens: list + :param grant_tokens: A list of grant tokens that represent grants which + can be used to provide long term permissions to perform decryption. + + """ + if not isinstance(ciphertext_blob, six.binary_type): + raise TypeError( + "Value of argument ``ciphertext_blob`` " + "must be of type %s." % six.binary_type) + ciphertext_blob = base64.b64encode(ciphertext_blob) + params = {'CiphertextBlob': ciphertext_blob, } + if encryption_context is not None: + params['EncryptionContext'] = encryption_context + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='Decrypt', + body=json.dumps(params)) + if response.get('Plaintext') is not None: + response['Plaintext'] = base64.b64decode( + response['Plaintext'].encode('utf-8')) + return response + + def delete_alias(self, alias_name): + """ + Deletes the specified alias. + + :type alias_name: string + :param alias_name: The alias to be deleted. + + """ + params = {'AliasName': alias_name, } + return self.make_request(action='DeleteAlias', + body=json.dumps(params)) + + def describe_key(self, key_id): + """ + Provides detailed information about the specified customer + master key. + + :type key_id: string + :param key_id: Unique identifier of the customer master key to be + described. This can be an ARN, an alias, or a globally unique + identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='DescribeKey', + body=json.dumps(params)) + + def disable_key(self, key_id): + """ + Marks a key as disabled, thereby preventing its use. + + :type key_id: string + :param key_id: Unique identifier of the customer master key to be + disabled. This can be an ARN, an alias, or a globally unique + identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='DisableKey', + body=json.dumps(params)) + + def disable_key_rotation(self, key_id): + """ + Disables rotation of the specified key. + + :type key_id: string + :param key_id: Unique identifier of the customer master key for which + rotation is to be disabled. This can be an ARN, an alias, or a + globally unique identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='DisableKeyRotation', + body=json.dumps(params)) + + def enable_key(self, key_id): + """ + Marks a key as enabled, thereby permitting its use. You can + have up to 25 enabled keys at one time. + + :type key_id: string + :param key_id: Unique identifier of the customer master key to be + enabled. This can be an ARN, an alias, or a globally unique + identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='EnableKey', + body=json.dumps(params)) + + def enable_key_rotation(self, key_id): + """ + Enables rotation of the specified customer master key. + + :type key_id: string + :param key_id: Unique identifier of the customer master key for which + rotation is to be enabled. This can be an ARN, an alias, or a + globally unique identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='EnableKeyRotation', + body=json.dumps(params)) + + def encrypt(self, key_id, plaintext, encryption_context=None, + grant_tokens=None): + """ + Encrypts plaintext into ciphertext by using a customer master + key. + + :type key_id: string + :param key_id: Unique identifier of the customer master. This can be an + ARN, an alias, or the Key ID. + + :type plaintext: blob + :param plaintext: Data to be encrypted. + + :type encryption_context: map + :param encryption_context: Name:value pair that specifies the + encryption context to be used for authenticated encryption. For + more information, see `Authenticated Encryption`_. + + :type grant_tokens: list + :param grant_tokens: A list of grant tokens that represent grants which + can be used to provide long term permissions to perform encryption. + + """ + if not isinstance(plaintext, six.binary_type): + raise TypeError( + "Value of argument ``plaintext`` " + "must be of type %s." % six.binary_type) + plaintext = base64.b64encode(plaintext) + params = {'KeyId': key_id, 'Plaintext': plaintext, } + if encryption_context is not None: + params['EncryptionContext'] = encryption_context + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='Encrypt', + body=json.dumps(params)) + if response.get('CiphertextBlob') is not None: + response['CiphertextBlob'] = base64.b64decode( + response['CiphertextBlob'].encode('utf-8')) + return response + + def generate_data_key(self, key_id, encryption_context=None, + number_of_bytes=None, key_spec=None, + grant_tokens=None): + """ + Generates a secure data key. Data keys are used to encrypt and + decrypt data. They are wrapped by customer master keys. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type encryption_context: map + :param encryption_context: Name/value pair that contains additional + data to be authenticated during the encryption and decryption + processes that use the key. This value is logged by AWS CloudTrail + to provide context around the data encrypted by the key. + + :type number_of_bytes: integer + :param number_of_bytes: Integer that contains the number of bytes to + generate. Common values are 128, 256, 512, 1024 and so on. 1024 is + the current limit. + + :type key_spec: string + :param key_spec: Value that identifies the encryption algorithm and key + size to generate a data key for. Currently this can be AES_128 or + AES_256. + + :type grant_tokens: list + :param grant_tokens: A list of grant tokens that represent grants which + can be used to provide long term permissions to generate a key. + + """ + params = {'KeyId': key_id, } + if encryption_context is not None: + params['EncryptionContext'] = encryption_context + if number_of_bytes is not None: + params['NumberOfBytes'] = number_of_bytes + if key_spec is not None: + params['KeySpec'] = key_spec + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='GenerateDataKey', + body=json.dumps(params)) + if response.get('CiphertextBlob') is not None: + response['CiphertextBlob'] = base64.b64decode( + response['CiphertextBlob'].encode('utf-8')) + if response.get('Plaintext') is not None: + response['Plaintext'] = base64.b64decode( + response['Plaintext'].encode('utf-8')) + return response + + def generate_data_key_without_plaintext(self, key_id, + encryption_context=None, + key_spec=None, + number_of_bytes=None, + grant_tokens=None): + """ + Returns a key wrapped by a customer master key without the + plaintext copy of that key. To retrieve the plaintext, see + GenerateDataKey. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type encryption_context: map + :param encryption_context: Name:value pair that contains additional + data to be authenticated during the encryption and decryption + processes. + + :type key_spec: string + :param key_spec: Value that identifies the encryption algorithm and key + size. Currently this can be AES_128 or AES_256. + + :type number_of_bytes: integer + :param number_of_bytes: Integer that contains the number of bytes to + generate. Common values are 128, 256, 512, 1024 and so on. + + :type grant_tokens: list + :param grant_tokens: A list of grant tokens that represent grants which + can be used to provide long term permissions to generate a key. + + """ + params = {'KeyId': key_id, } + if encryption_context is not None: + params['EncryptionContext'] = encryption_context + if key_spec is not None: + params['KeySpec'] = key_spec + if number_of_bytes is not None: + params['NumberOfBytes'] = number_of_bytes + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='GenerateDataKeyWithoutPlaintext', + body=json.dumps(params)) + if response.get('CiphertextBlob') is not None: + response['CiphertextBlob'] = base64.b64decode( + response['CiphertextBlob'].encode('utf-8')) + return response + + def generate_random(self, number_of_bytes=None): + """ + Generates an unpredictable byte string. + + :type number_of_bytes: integer + :param number_of_bytes: Integer that contains the number of bytes to + generate. Common values are 128, 256, 512, 1024 and so on. The + current limit is 1024 bytes. + + """ + params = {} + if number_of_bytes is not None: + params['NumberOfBytes'] = number_of_bytes + response = self.make_request(action='GenerateRandom', + body=json.dumps(params)) + if response.get('Plaintext') is not None: + response['Plaintext'] = base64.b64decode( + response['Plaintext'].encode('utf-8')) + return response + + def get_key_policy(self, key_id, policy_name): + """ + Retrieves a policy attached to the specified key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type policy_name: string + :param policy_name: String that contains the name of the policy. + Currently, this must be "default". Policy names can be discovered + by calling ListKeyPolicies. + + """ + params = {'KeyId': key_id, 'PolicyName': policy_name, } + return self.make_request(action='GetKeyPolicy', + body=json.dumps(params)) + + def get_key_rotation_status(self, key_id): + """ + Retrieves a Boolean value that indicates whether key rotation + is enabled for the specified key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='GetKeyRotationStatus', + body=json.dumps(params)) + + def list_aliases(self, limit=None, marker=None): + """ + Lists all of the key aliases in the account. + + :type limit: integer + :param limit: Specify this parameter when paginating results to + indicate the maximum number of aliases you want in each response. + If there are additional aliases beyond the maximum you specify, the + `Truncated` response element will be set to `true.` + + :type marker: string + :param marker: Use this parameter when paginating results, and only in + a subsequent request after you've received a response where the + results are truncated. Set it to the value of the `NextMarker` + element in the response you just received. + + """ + params = {} + if limit is not None: + params['Limit'] = limit + if marker is not None: + params['Marker'] = marker + return self.make_request(action='ListAliases', + body=json.dumps(params)) + + def list_grants(self, key_id, limit=None, marker=None): + """ + List the grants for a specified key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type limit: integer + :param limit: Specify this parameter only when paginating results to + indicate the maximum number of grants you want listed in the + response. If there are additional grants beyond the maximum you + specify, the `Truncated` response element will be set to `true.` + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response where + the results are truncated. Set it to the value of the `NextMarker` + in the response you just received. + + """ + params = {'KeyId': key_id, } + if limit is not None: + params['Limit'] = limit + if marker is not None: + params['Marker'] = marker + return self.make_request(action='ListGrants', + body=json.dumps(params)) + + def list_key_policies(self, key_id, limit=None, marker=None): + """ + Retrieves a list of policies attached to a key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type limit: integer + :param limit: Specify this parameter only when paginating results to + indicate the maximum number of policies you want listed in the + response. If there are additional policies beyond the maximum you + specify, the `Truncated` response element will be set to `true.` + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response where + the results are truncated. Set it to the value of the `NextMarker` + in the response you just received. + + """ + params = {'KeyId': key_id, } + if limit is not None: + params['Limit'] = limit + if marker is not None: + params['Marker'] = marker + return self.make_request(action='ListKeyPolicies', + body=json.dumps(params)) + + def list_keys(self, limit=None, marker=None): + """ + Lists the customer master keys. + + :type limit: integer + :param limit: Specify this parameter only when paginating results to + indicate the maximum number of keys you want listed in the + response. If there are additional keys beyond the maximum you + specify, the `Truncated` response element will be set to `true.` + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response where + the results are truncated. Set it to the value of the `NextMarker` + in the response you just received. + + """ + params = {} + if limit is not None: + params['Limit'] = limit + if marker is not None: + params['Marker'] = marker + return self.make_request(action='ListKeys', + body=json.dumps(params)) + + def put_key_policy(self, key_id, policy_name, policy): + """ + Attaches a policy to the specified key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type policy_name: string + :param policy_name: Name of the policy to be attached. Currently, the + only supported name is "default". + + :type policy: string + :param policy: The policy, in JSON format, to be attached to the key. + + """ + params = { + 'KeyId': key_id, + 'PolicyName': policy_name, + 'Policy': policy, + } + return self.make_request(action='PutKeyPolicy', + body=json.dumps(params)) + + def re_encrypt(self, ciphertext_blob, destination_key_id, + source_encryption_context=None, + destination_encryption_context=None, grant_tokens=None): + """ + Encrypts data on the server side with a new customer master + key without exposing the plaintext of the data on the client + side. The data is first decrypted and then encrypted. This + operation can also be used to change the encryption context of + a ciphertext. + + :type ciphertext_blob: blob + :param ciphertext_blob: Ciphertext of the data to re-encrypt. + + :type source_encryption_context: map + :param source_encryption_context: Encryption context used to encrypt + and decrypt the data specified in the `CiphertextBlob` parameter. + + :type destination_key_id: string + :param destination_key_id: Key identifier of the key used to re-encrypt + the data. + + :type destination_encryption_context: map + :param destination_encryption_context: Encryption context to be used + when the data is re-encrypted. + + :type grant_tokens: list + :param grant_tokens: Grant tokens that identify the grants that have + permissions for the encryption and decryption process. + + """ + if not isinstance(ciphertext_blob, six.binary_type): + raise TypeError( + "Value of argument ``ciphertext_blob`` " + "must be of type %s." % six.binary_type) + ciphertext_blob = base64.b64encode(ciphertext_blob) + params = { + 'CiphertextBlob': ciphertext_blob, + 'DestinationKeyId': destination_key_id, + } + if source_encryption_context is not None: + params['SourceEncryptionContext'] = source_encryption_context + if destination_encryption_context is not None: + params['DestinationEncryptionContext'] = destination_encryption_context + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='ReEncrypt', + body=json.dumps(params)) + if response.get('CiphertextBlob') is not None: + response['CiphertextBlob'] = base64.b64decode( + response['CiphertextBlob'].encode('utf-8')) + return response + + def retire_grant(self, grant_token): + """ + Retires a grant. You can retire a grant when you're done using + it to clean up. You should revoke a grant when you intend to + actively deny operations that depend on it. + + :type grant_token: string + :param grant_token: Token that identifies the grant to be retired. + + """ + params = {'GrantToken': grant_token, } + return self.make_request(action='RetireGrant', + body=json.dumps(params)) + + def revoke_grant(self, key_id, grant_id): + """ + Revokes a grant. You can revoke a grant to actively deny + operations that depend on it. + + :type key_id: string + :param key_id: Unique identifier of the key associated with the grant. + + :type grant_id: string + :param grant_id: Identifier of the grant to be revoked. + + """ + params = {'KeyId': key_id, 'GrantId': grant_id, } + return self.make_request(action='RevokeGrant', + body=json.dumps(params)) + + def update_key_description(self, key_id, description): + """ + + + :type key_id: string + :param key_id: + + :type description: string + :param description: + + """ + params = {'KeyId': key_id, 'Description': description, } + return self.make_request(action='UpdateKeyDescription', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/logs/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/logs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ea075d1ea27b3dd5bb3f338aeea73ae00993a95 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/logs/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import get_regions + + +def regions(): + """ + Get all available regions for the CloudWatch Logs service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.logs.layer1 import CloudWatchLogsConnection + return get_regions('logs', connection_cls=CloudWatchLogsConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/logs/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/logs/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..49c01fa91c0683865e381aa69af8dc9ce42d7d76 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/logs/exceptions.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class LimitExceededException(BotoServerError): + pass + + +class DataAlreadyAcceptedException(BotoServerError): + pass + + +class ResourceInUseException(BotoServerError): + pass + + +class ServiceUnavailableException(BotoServerError): + pass + + +class InvalidParameterException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class ResourceAlreadyExistsException(BotoServerError): + pass + + +class OperationAbortedException(BotoServerError): + pass + + +class InvalidSequenceTokenException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/logs/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/logs/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..26f7aff7cd23a30b32d6a7c8a7579cfce5c765ed --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/logs/layer1.py @@ -0,0 +1,576 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.logs import exceptions +from boto.compat import json + + +class CloudWatchLogsConnection(AWSQueryConnection): + """ + Amazon CloudWatch Logs Service API Reference + This is the Amazon CloudWatch Logs API Reference . Amazon + CloudWatch Logs is a managed service for real time monitoring and + archival of application logs. This guide provides detailed + information about Amazon CloudWatch Logs actions, data types, + parameters, and errors. For detailed information about Amazon + CloudWatch Logs features and their associated API calls, go to the + `Amazon CloudWatch Logs Developer Guide`_. + + Use the following links to get started using the Amazon CloudWatch + API Reference : + + + + `Actions`_: An alphabetical list of all Amazon CloudWatch Logs + actions. + + `Data Types`_: An alphabetical list of all Amazon CloudWatch + Logs data types. + + `Common Parameters`_: Parameters that all Query actions can use. + + `Common Errors`_: Client and server errors that all actions can + return. + + `Regions and Endpoints`_: Itemized regions and endpoints for all + AWS products. + + + In addition to using the Amazon CloudWatch Logs API, you can also + use the following SDKs and third-party libraries to access Amazon + CloudWatch Logs programmatically. + + + + `AWS SDK for Java Documentation`_ + + `AWS SDK for .NET Documentation`_ + + `AWS SDK for PHP Documentation`_ + + `AWS SDK for Ruby Documentation`_ + + + Developers in the AWS developer community also provide their own + libraries, which you can find at the following AWS developer + centers: + + + + `AWS Java Developer Center`_ + + `AWS PHP Developer Center`_ + + `AWS Python Developer Center`_ + + `AWS Ruby Developer Center`_ + + `AWS Windows and .NET Developer Center`_ + """ + APIVersion = "2014-03-28" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "logs.us-east-1.amazonaws.com" + ServiceName = "CloudWatchLogs" + TargetPrefix = "Logs_20140328" + ResponseError = JSONResponseError + + _faults = { + "LimitExceededException": exceptions.LimitExceededException, + "DataAlreadyAcceptedException": exceptions.DataAlreadyAcceptedException, + "ResourceInUseException": exceptions.ResourceInUseException, + "ServiceUnavailableException": exceptions.ServiceUnavailableException, + "InvalidParameterException": exceptions.InvalidParameterException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "ResourceAlreadyExistsException": exceptions.ResourceAlreadyExistsException, + "OperationAbortedException": exceptions.OperationAbortedException, + "InvalidSequenceTokenException": exceptions.InvalidSequenceTokenException, + } + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CloudWatchLogsConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_log_group(self, log_group_name): + """ + Creates a new log group with the specified name. The name of + the log group must be unique within a region for an AWS + account. You can create up to 100 log groups per account. + + You must use the following guidelines when naming a log group: + + + Log group names can be between 1 and 512 characters long. + + Allowed characters are az, AZ, 09, '_' (underscore), '-' + (hyphen), '/' (forward slash), and '.' (period). + + + + Log groups are created with a default retention of 14 days. + The retention attribute allow you to configure the number of + days you want to retain log events in the specified log group. + See the `SetRetention` operation on how to modify the + retention of your log groups. + + :type log_group_name: string + :param log_group_name: + + """ + params = {'logGroupName': log_group_name, } + return self.make_request(action='CreateLogGroup', + body=json.dumps(params)) + + def create_log_stream(self, log_group_name, log_stream_name): + """ + Creates a new log stream in the specified log group. The name + of the log stream must be unique within the log group. There + is no limit on the number of log streams that can exist in a + log group. + + You must use the following guidelines when naming a log + stream: + + + Log stream names can be between 1 and 512 characters long. + + The ':' colon character is not allowed. + + :type log_group_name: string + :param log_group_name: + + :type log_stream_name: string + :param log_stream_name: + + """ + params = { + 'logGroupName': log_group_name, + 'logStreamName': log_stream_name, + } + return self.make_request(action='CreateLogStream', + body=json.dumps(params)) + + def delete_log_group(self, log_group_name): + """ + Deletes the log group with the specified name. Amazon + CloudWatch Logs will delete a log group only if there are no + log streams and no metric filters associated with the log + group. If this condition is not satisfied, the request will + fail and the log group will not be deleted. + + :type log_group_name: string + :param log_group_name: + + """ + params = {'logGroupName': log_group_name, } + return self.make_request(action='DeleteLogGroup', + body=json.dumps(params)) + + def delete_log_stream(self, log_group_name, log_stream_name): + """ + Deletes a log stream and permanently deletes all the archived + log events associated with it. + + :type log_group_name: string + :param log_group_name: + + :type log_stream_name: string + :param log_stream_name: + + """ + params = { + 'logGroupName': log_group_name, + 'logStreamName': log_stream_name, + } + return self.make_request(action='DeleteLogStream', + body=json.dumps(params)) + + def delete_metric_filter(self, log_group_name, filter_name): + """ + Deletes a metric filter associated with the specified log + group. + + :type log_group_name: string + :param log_group_name: + + :type filter_name: string + :param filter_name: The name of the metric filter. + + """ + params = { + 'logGroupName': log_group_name, + 'filterName': filter_name, + } + return self.make_request(action='DeleteMetricFilter', + body=json.dumps(params)) + + def delete_retention_policy(self, log_group_name): + """ + + + :type log_group_name: string + :param log_group_name: + + """ + params = {'logGroupName': log_group_name, } + return self.make_request(action='DeleteRetentionPolicy', + body=json.dumps(params)) + + def describe_log_groups(self, log_group_name_prefix=None, + next_token=None, limit=None): + """ + Returns all the log groups that are associated with the AWS + account making the request. The list returned in the response + is ASCII-sorted by log group name. + + By default, this operation returns up to 50 log groups. If + there are more log groups to list, the response would contain + a `nextToken` value in the response body. You can also limit + the number of log groups returned in the response by + specifying the `limit` parameter in the request. + + :type log_group_name_prefix: string + :param log_group_name_prefix: + + :type next_token: string + :param next_token: A string token used for pagination that points to + the next page of results. It must be a value obtained from the + response of the previous `DescribeLogGroups` request. + + :type limit: integer + :param limit: The maximum number of items returned in the response. If + you don't specify a value, the request would return up to 50 items. + + """ + params = {} + if log_group_name_prefix is not None: + params['logGroupNamePrefix'] = log_group_name_prefix + if next_token is not None: + params['nextToken'] = next_token + if limit is not None: + params['limit'] = limit + return self.make_request(action='DescribeLogGroups', + body=json.dumps(params)) + + def describe_log_streams(self, log_group_name, + log_stream_name_prefix=None, next_token=None, + limit=None): + """ + Returns all the log streams that are associated with the + specified log group. The list returned in the response is + ASCII-sorted by log stream name. + + By default, this operation returns up to 50 log streams. If + there are more log streams to list, the response would contain + a `nextToken` value in the response body. You can also limit + the number of log streams returned in the response by + specifying the `limit` parameter in the request. + + :type log_group_name: string + :param log_group_name: + + :type log_stream_name_prefix: string + :param log_stream_name_prefix: + + :type next_token: string + :param next_token: A string token used for pagination that points to + the next page of results. It must be a value obtained from the + response of the previous `DescribeLogStreams` request. + + :type limit: integer + :param limit: The maximum number of items returned in the response. If + you don't specify a value, the request would return up to 50 items. + + """ + params = {'logGroupName': log_group_name, } + if log_stream_name_prefix is not None: + params['logStreamNamePrefix'] = log_stream_name_prefix + if next_token is not None: + params['nextToken'] = next_token + if limit is not None: + params['limit'] = limit + return self.make_request(action='DescribeLogStreams', + body=json.dumps(params)) + + def describe_metric_filters(self, log_group_name, + filter_name_prefix=None, next_token=None, + limit=None): + """ + Returns all the metrics filters associated with the specified + log group. The list returned in the response is ASCII-sorted + by filter name. + + By default, this operation returns up to 50 metric filters. If + there are more metric filters to list, the response would + contain a `nextToken` value in the response body. You can also + limit the number of metric filters returned in the response by + specifying the `limit` parameter in the request. + + :type log_group_name: string + :param log_group_name: + + :type filter_name_prefix: string + :param filter_name_prefix: The name of the metric filter. + + :type next_token: string + :param next_token: A string token used for pagination that points to + the next page of results. It must be a value obtained from the + response of the previous `DescribeMetricFilters` request. + + :type limit: integer + :param limit: The maximum number of items returned in the response. If + you don't specify a value, the request would return up to 50 items. + + """ + params = {'logGroupName': log_group_name, } + if filter_name_prefix is not None: + params['filterNamePrefix'] = filter_name_prefix + if next_token is not None: + params['nextToken'] = next_token + if limit is not None: + params['limit'] = limit + return self.make_request(action='DescribeMetricFilters', + body=json.dumps(params)) + + def get_log_events(self, log_group_name, log_stream_name, + start_time=None, end_time=None, next_token=None, + limit=None, start_from_head=None): + """ + Retrieves log events from the specified log stream. You can + provide an optional time range to filter the results on the + event `timestamp`. + + By default, this operation returns as much log events as can + fit in a response size of 1MB, up to 10,000 log events. The + response will always include a `nextForwardToken` and a + `nextBackwardToken` in the response body. You can use any of + these tokens in subsequent `GetLogEvents` requests to paginate + through events in either forward or backward direction. You + can also limit the number of log events returned in the + response by specifying the `limit` parameter in the request. + + :type log_group_name: string + :param log_group_name: + + :type log_stream_name: string + :param log_stream_name: + + :type start_time: long + :param start_time: A point in time expressed as the number milliseconds + since Jan 1, 1970 00:00:00 UTC. + + :type end_time: long + :param end_time: A point in time expressed as the number milliseconds + since Jan 1, 1970 00:00:00 UTC. + + :type next_token: string + :param next_token: A string token used for pagination that points to + the next page of results. It must be a value obtained from the + `nextForwardToken` or `nextBackwardToken` fields in the response of + the previous `GetLogEvents` request. + + :type limit: integer + :param limit: The maximum number of log events returned in the + response. If you don't specify a value, the request would return as + much log events as can fit in a response size of 1MB, up to 10,000 + log events. + + :type start_from_head: boolean + :param start_from_head: + + """ + params = { + 'logGroupName': log_group_name, + 'logStreamName': log_stream_name, + } + if start_time is not None: + params['startTime'] = start_time + if end_time is not None: + params['endTime'] = end_time + if next_token is not None: + params['nextToken'] = next_token + if limit is not None: + params['limit'] = limit + if start_from_head is not None: + params['startFromHead'] = start_from_head + return self.make_request(action='GetLogEvents', + body=json.dumps(params)) + + def put_log_events(self, log_group_name, log_stream_name, log_events, + sequence_token=None): + """ + Uploads a batch of log events to the specified log stream. + + Every PutLogEvents request must include the `sequenceToken` + obtained from the response of the previous request. An upload + in a newly created log stream does not require a + `sequenceToken`. + + The batch of events must satisfy the following constraints: + + + The maximum batch size is 32,768 bytes, and this size is + calculated as the sum of all event messages in UTF-8, plus 26 + bytes for each log event. + + None of the log events in the batch can be more than 2 hours + in the future. + + None of the log events in the batch can be older than 14 + days or the retention period of the log group. + + The log events in the batch must be in chronological ordered + by their `timestamp`. + + The maximum number of log events in a batch is 1,000. + + :type log_group_name: string + :param log_group_name: + + :type log_stream_name: string + :param log_stream_name: + + :type log_events: list + :param log_events: A list of events belonging to a log stream. + + :type sequence_token: string + :param sequence_token: A string token that must be obtained from the + response of the previous `PutLogEvents` request. + + """ + params = { + 'logGroupName': log_group_name, + 'logStreamName': log_stream_name, + 'logEvents': log_events, + } + if sequence_token is not None: + params['sequenceToken'] = sequence_token + return self.make_request(action='PutLogEvents', + body=json.dumps(params)) + + def put_metric_filter(self, log_group_name, filter_name, filter_pattern, + metric_transformations): + """ + Creates or updates a metric filter and associates it with the + specified log group. Metric filters allow you to configure + rules to extract metric data from log events ingested through + `PutLogEvents` requests. + + :type log_group_name: string + :param log_group_name: + + :type filter_name: string + :param filter_name: The name of the metric filter. + + :type filter_pattern: string + :param filter_pattern: + + :type metric_transformations: list + :param metric_transformations: + + """ + params = { + 'logGroupName': log_group_name, + 'filterName': filter_name, + 'filterPattern': filter_pattern, + 'metricTransformations': metric_transformations, + } + return self.make_request(action='PutMetricFilter', + body=json.dumps(params)) + + def put_retention_policy(self, log_group_name, retention_in_days): + """ + + + :type log_group_name: string + :param log_group_name: + + :type retention_in_days: integer + :param retention_in_days: Specifies the number of days you want to + retain log events in the specified log group. Possible values are: + 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 547, 730. + + """ + params = { + 'logGroupName': log_group_name, + 'retentionInDays': retention_in_days, + } + return self.make_request(action='PutRetentionPolicy', + body=json.dumps(params)) + + def set_retention(self, log_group_name, retention_in_days): + """ + Sets the retention of the specified log group. Log groups are + created with a default retention of 14 days. The retention + attribute allow you to configure the number of days you want + to retain log events in the specified log group. + + :type log_group_name: string + :param log_group_name: + + :type retention_in_days: integer + :param retention_in_days: Specifies the number of days you want to + retain log events in the specified log group. Possible values are: + 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 547, 730. + + """ + params = { + 'logGroupName': log_group_name, + 'retentionInDays': retention_in_days, + } + return self.make_request(action='SetRetention', + body=json.dumps(params)) + + def test_metric_filter(self, filter_pattern, log_event_messages): + """ + Tests the filter pattern of a metric filter against a sample + of log event messages. You can use this operation to validate + the correctness of a metric filter pattern. + + :type filter_pattern: string + :param filter_pattern: + + :type log_event_messages: list + :param log_event_messages: + + """ + params = { + 'filterPattern': filter_pattern, + 'logEventMessages': log_event_messages, + } + return self.make_request(action='TestMetricFilter', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/machinelearning/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/machinelearning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..34fab912509c394fe284ee9dc526ae907b80166e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/machinelearning/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon Machine Learning. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.machinelearning.layer1 import MachineLearningConnection + return get_regions('machinelearning', + connection_cls=MachineLearningConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/machinelearning/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/machinelearning/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..17f396fdc920180229c7541f57841b16a514425b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/machinelearning/exceptions.py @@ -0,0 +1,51 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class InternalServerException(BotoServerError): + pass + + +class LimitExceededException(BotoServerError): + pass + + +class IdempotentParameterMismatchException(BotoServerError): + pass + + +class ResourceInUseException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class PredictorNotMountedException(BotoServerError): + pass + + +class InvalidInputException(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/machinelearning/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/machinelearning/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..0768fb1ea9c238ff056c2fec20d513d6f80bf3de --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/machinelearning/layer1.py @@ -0,0 +1,1408 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json, urlsplit +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.machinelearning import exceptions + + +class MachineLearningConnection(AWSQueryConnection): + """ + Definition of the public APIs exposed by Amazon Machine Learning + """ + APIVersion = "2014-12-12" + AuthServiceName = 'machinelearning' + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "machinelearning.us-east-1.amazonaws.com" + ServiceName = "MachineLearning" + TargetPrefix = "AmazonML_20141212" + ResponseError = JSONResponseError + + _faults = { + "InternalServerException": exceptions.InternalServerException, + "LimitExceededException": exceptions.LimitExceededException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "IdempotentParameterMismatchException": exceptions.IdempotentParameterMismatchException, + "PredictorNotMountedException": exceptions.PredictorNotMountedException, + "InvalidInputException": exceptions.InvalidInputException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(MachineLearningConnection, self).__init__(**kwargs) + self.region = region + self.auth_region_name = self.region.name + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_batch_prediction(self, batch_prediction_id, ml_model_id, + batch_prediction_data_source_id, output_uri, + batch_prediction_name=None): + """ + Generates predictions for a group of observations. The + observations to process exist in one or more data files + referenced by a `DataSource`. This operation creates a new + `BatchPrediction`, and uses an `MLModel` and the data files + referenced by the `DataSource` as information sources. + + `CreateBatchPrediction` is an asynchronous operation. In + response to `CreateBatchPrediction`, Amazon Machine Learning + (Amazon ML) immediately returns and sets the `BatchPrediction` + status to `PENDING`. After the `BatchPrediction` completes, + Amazon ML sets the status to `COMPLETED`. + + You can poll for status updates by using the + GetBatchPrediction operation and checking the `Status` + parameter of the result. After the `COMPLETED` status appears, + the results are available in the location specified by the + `OutputUri` parameter. + + :type batch_prediction_id: string + :param batch_prediction_id: A user-supplied ID that uniquely identifies + the `BatchPrediction`. + + :type batch_prediction_name: string + :param batch_prediction_name: A user-supplied name or description of + the `BatchPrediction`. `BatchPredictionName` can only use the UTF-8 + character set. + + :type ml_model_id: string + :param ml_model_id: The ID of the `MLModel` that will generate + predictions for the group of observations. + + :type batch_prediction_data_source_id: string + :param batch_prediction_data_source_id: The ID of the `DataSource` that + points to the group of observations to predict. + + :type output_uri: string + :param output_uri: The location of an Amazon Simple Storage Service + (Amazon S3) bucket or directory to store the batch prediction + results. The following substrings are not allowed in the s3 key + portion of the "outputURI" field: ':', '//', '/./', '/../'. + Amazon ML needs permissions to store and retrieve the logs on your + behalf. For information about how to set permissions, see the + `Amazon Machine Learning Developer Guide`_. + + """ + params = { + 'BatchPredictionId': batch_prediction_id, + 'MLModelId': ml_model_id, + 'BatchPredictionDataSourceId': batch_prediction_data_source_id, + 'OutputUri': output_uri, + } + if batch_prediction_name is not None: + params['BatchPredictionName'] = batch_prediction_name + return self.make_request(action='CreateBatchPrediction', + body=json.dumps(params)) + + def create_data_source_from_rds(self, data_source_id, rds_data, role_arn, + data_source_name=None, + compute_statistics=None): + """ + Creates a `DataSource` object from an ` Amazon Relational + Database Service`_ (Amazon RDS). A `DataSource` references + data that can be used to perform CreateMLModel, + CreateEvaluation, or CreateBatchPrediction operations. + + `CreateDataSourceFromRDS` is an asynchronous operation. In + response to `CreateDataSourceFromRDS`, Amazon Machine Learning + (Amazon ML) immediately returns and sets the `DataSource` + status to `PENDING`. After the `DataSource` is created and + ready for use, Amazon ML sets the `Status` parameter to + `COMPLETED`. `DataSource` in `COMPLETED` or `PENDING` status + can only be used to perform CreateMLModel, CreateEvaluation, + or CreateBatchPrediction operations. + + If Amazon ML cannot accept the input source, it sets the + `Status` parameter to `FAILED` and includes an error message + in the `Message` attribute of the GetDataSource operation + response. + + :type data_source_id: string + :param data_source_id: A user-supplied ID that uniquely identifies the + `DataSource`. Typically, an Amazon Resource Number (ARN) becomes + the ID for a `DataSource`. + + :type data_source_name: string + :param data_source_name: A user-supplied name or description of the + `DataSource`. + + :type rds_data: dict + :param rds_data: + The data specification of an Amazon RDS `DataSource`: + + + + DatabaseInformation - + + + `DatabaseName ` - Name of the Amazon RDS database. + + ` InstanceIdentifier ` - Unique identifier for the Amazon RDS + database instance. + + + DatabaseCredentials - AWS Identity and Access Management (IAM) + credentials that are used to connect to the Amazon RDS database. + + ResourceRole - Role (DataPipelineDefaultResourceRole) assumed by an + Amazon Elastic Compute Cloud (EC2) instance to carry out the copy + task from Amazon RDS to Amazon S3. For more information, see `Role + templates`_ for data pipelines. + + ServiceRole - Role (DataPipelineDefaultRole) assumed by the AWS Data + Pipeline service to monitor the progress of the copy task from + Amazon RDS to Amazon Simple Storage Service (S3). For more + information, see `Role templates`_ for data pipelines. + + SecurityInfo - Security information to use to access an Amazon RDS + instance. You need to set up appropriate ingress rules for the + security entity IDs provided to allow access to the Amazon RDS + instance. Specify a [ `SubnetId`, `SecurityGroupIds`] pair for a + VPC-based Amazon RDS instance. + + SelectSqlQuery - Query that is used to retrieve the observation data + for the `Datasource`. + + S3StagingLocation - Amazon S3 location for staging RDS data. The data + retrieved from Amazon RDS using `SelectSqlQuery` is stored in this + location. + + DataSchemaUri - Amazon S3 location of the `DataSchema`. + + DataSchema - A JSON string representing the schema. This is not + required if `DataSchemaUri` is specified. + + DataRearrangement - A JSON string representing the splitting + requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some- + random-seed\", + \"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"` + + :type role_arn: string + :param role_arn: The role that Amazon ML assumes on behalf of the user + to create and activate a data pipeline in the users account and + copy data (using the `SelectSqlQuery`) query from Amazon RDS to + Amazon S3. + + :type compute_statistics: boolean + :param compute_statistics: The compute statistics for a `DataSource`. + The statistics are generated from the observation data referenced + by a `DataSource`. Amazon ML uses the statistics internally during + an `MLModel` training. This parameter must be set to `True` if the + ``DataSource `` needs to be used for `MLModel` training. + + """ + params = { + 'DataSourceId': data_source_id, + 'RDSData': rds_data, + 'RoleARN': role_arn, + } + if data_source_name is not None: + params['DataSourceName'] = data_source_name + if compute_statistics is not None: + params['ComputeStatistics'] = compute_statistics + return self.make_request(action='CreateDataSourceFromRDS', + body=json.dumps(params)) + + def create_data_source_from_redshift(self, data_source_id, data_spec, + role_arn, data_source_name=None, + compute_statistics=None): + """ + Creates a `DataSource` from `Amazon Redshift`_. A `DataSource` + references data that can be used to perform either + CreateMLModel, CreateEvaluation or CreateBatchPrediction + operations. + + `CreateDataSourceFromRedshift` is an asynchronous operation. + In response to `CreateDataSourceFromRedshift`, Amazon Machine + Learning (Amazon ML) immediately returns and sets the + `DataSource` status to `PENDING`. After the `DataSource` is + created and ready for use, Amazon ML sets the `Status` + parameter to `COMPLETED`. `DataSource` in `COMPLETED` or + `PENDING` status can only be used to perform CreateMLModel, + CreateEvaluation, or CreateBatchPrediction operations. + + If Amazon ML cannot accept the input source, it sets the + `Status` parameter to `FAILED` and includes an error message + in the `Message` attribute of the GetDataSource operation + response. + + The observations should exist in the database hosted on an + Amazon Redshift cluster and should be specified by a + `SelectSqlQuery`. Amazon ML executes ` Unload`_ command in + Amazon Redshift to transfer the result set of `SelectSqlQuery` + to `S3StagingLocation.` + + After the `DataSource` is created, it's ready for use in + evaluations and batch predictions. If you plan to use the + `DataSource` to train an `MLModel`, the `DataSource` requires + another item -- a recipe. A recipe describes the observation + variables that participate in training an `MLModel`. A recipe + describes how each input variable will be used in training. + Will the variable be included or excluded from training? Will + the variable be manipulated, for example, combined with + another variable or split apart into word combinations? The + recipe provides answers to these questions. For more + information, see the Amazon Machine Learning Developer Guide. + + :type data_source_id: string + :param data_source_id: A user-supplied ID that uniquely identifies the + `DataSource`. + + :type data_source_name: string + :param data_source_name: A user-supplied name or description of the + `DataSource`. + + :type data_spec: dict + :param data_spec: + The data specification of an Amazon Redshift `DataSource`: + + + + DatabaseInformation - + + + `DatabaseName ` - Name of the Amazon Redshift database. + + ` ClusterIdentifier ` - Unique ID for the Amazon Redshift cluster. + + + DatabaseCredentials - AWS Identity abd Access Management (IAM) + credentials that are used to connect to the Amazon Redshift + database. + + SelectSqlQuery - Query that is used to retrieve the observation data + for the `Datasource`. + + S3StagingLocation - Amazon Simple Storage Service (Amazon S3) + location for staging Amazon Redshift data. The data retrieved from + Amazon Relational Database Service (Amazon RDS) using + `SelectSqlQuery` is stored in this location. + + DataSchemaUri - Amazon S3 location of the `DataSchema`. + + DataSchema - A JSON string representing the schema. This is not + required if `DataSchemaUri` is specified. + + DataRearrangement - A JSON string representing the splitting + requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some- + random-seed\", + \"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"` + + :type role_arn: string + :param role_arn: A fully specified role Amazon Resource Name (ARN). + Amazon ML assumes the role on behalf of the user to create the + following: + + + + A security group to allow Amazon ML to execute the `SelectSqlQuery` + query on an Amazon Redshift cluster + + An Amazon S3 bucket policy to grant Amazon ML read/write permissions + on the `S3StagingLocation` + + :type compute_statistics: boolean + :param compute_statistics: The compute statistics for a `DataSource`. + The statistics are generated from the observation data referenced + by a `DataSource`. Amazon ML uses the statistics internally during + `MLModel` training. This parameter must be set to `True` if the + ``DataSource `` needs to be used for `MLModel` training + + """ + params = { + 'DataSourceId': data_source_id, + 'DataSpec': data_spec, + 'RoleARN': role_arn, + } + if data_source_name is not None: + params['DataSourceName'] = data_source_name + if compute_statistics is not None: + params['ComputeStatistics'] = compute_statistics + return self.make_request(action='CreateDataSourceFromRedshift', + body=json.dumps(params)) + + def create_data_source_from_s3(self, data_source_id, data_spec, + data_source_name=None, + compute_statistics=None): + """ + Creates a `DataSource` object. A `DataSource` references data + that can be used to perform CreateMLModel, CreateEvaluation, + or CreateBatchPrediction operations. + + `CreateDataSourceFromS3` is an asynchronous operation. In + response to `CreateDataSourceFromS3`, Amazon Machine Learning + (Amazon ML) immediately returns and sets the `DataSource` + status to `PENDING`. After the `DataSource` is created and + ready for use, Amazon ML sets the `Status` parameter to + `COMPLETED`. `DataSource` in `COMPLETED` or `PENDING` status + can only be used to perform CreateMLModel, CreateEvaluation or + CreateBatchPrediction operations. + + If Amazon ML cannot accept the input source, it sets the + `Status` parameter to `FAILED` and includes an error message + in the `Message` attribute of the GetDataSource operation + response. + + The observation data used in a `DataSource` should be ready to + use; that is, it should have a consistent structure, and + missing data values should be kept to a minimum. The + observation data must reside in one or more CSV files in an + Amazon Simple Storage Service (Amazon S3) bucket, along with a + schema that describes the data items by name and type. The + same schema must be used for all of the data files referenced + by the `DataSource`. + + After the `DataSource` has been created, it's ready to use in + evaluations and batch predictions. If you plan to use the + `DataSource` to train an `MLModel`, the `DataSource` requires + another item: a recipe. A recipe describes the observation + variables that participate in training an `MLModel`. A recipe + describes how each input variable will be used in training. + Will the variable be included or excluded from training? Will + the variable be manipulated, for example, combined with + another variable, or split apart into word combinations? The + recipe provides answers to these questions. For more + information, see the `Amazon Machine Learning Developer + Guide`_. + + :type data_source_id: string + :param data_source_id: A user-supplied identifier that uniquely + identifies the `DataSource`. + + :type data_source_name: string + :param data_source_name: A user-supplied name or description of the + `DataSource`. + + :type data_spec: dict + :param data_spec: + The data specification of a `DataSource`: + + + + DataLocationS3 - Amazon Simple Storage Service (Amazon S3) location + of the observation data. + + DataSchemaLocationS3 - Amazon S3 location of the `DataSchema`. + + DataSchema - A JSON string representing the schema. This is not + required if `DataSchemaUri` is specified. + + DataRearrangement - A JSON string representing the splitting + requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some- + random-seed\", + \"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"` + + :type compute_statistics: boolean + :param compute_statistics: The compute statistics for a `DataSource`. + The statistics are generated from the observation data referenced + by a `DataSource`. Amazon ML uses the statistics internally during + an `MLModel` training. This parameter must be set to `True` if the + ``DataSource `` needs to be used for `MLModel` training + + """ + params = { + 'DataSourceId': data_source_id, + 'DataSpec': data_spec, + } + if data_source_name is not None: + params['DataSourceName'] = data_source_name + if compute_statistics is not None: + params['ComputeStatistics'] = compute_statistics + return self.make_request(action='CreateDataSourceFromS3', + body=json.dumps(params)) + + def create_evaluation(self, evaluation_id, ml_model_id, + evaluation_data_source_id, evaluation_name=None): + """ + Creates a new `Evaluation` of an `MLModel`. An `MLModel` is + evaluated on a set of observations associated to a + `DataSource`. Like a `DataSource` for an `MLModel`, the + `DataSource` for an `Evaluation` contains values for the + Target Variable. The `Evaluation` compares the predicted + result for each observation to the actual outcome and provides + a summary so that you know how effective the `MLModel` + functions on the test data. Evaluation generates a relevant + performance metric such as BinaryAUC, RegressionRMSE or + MulticlassAvgFScore based on the corresponding `MLModelType`: + `BINARY`, `REGRESSION` or `MULTICLASS`. + + `CreateEvaluation` is an asynchronous operation. In response + to `CreateEvaluation`, Amazon Machine Learning (Amazon ML) + immediately returns and sets the evaluation status to + `PENDING`. After the `Evaluation` is created and ready for + use, Amazon ML sets the status to `COMPLETED`. + + You can use the GetEvaluation operation to check progress of + the evaluation during the creation operation. + + :type evaluation_id: string + :param evaluation_id: A user-supplied ID that uniquely identifies the + `Evaluation`. + + :type evaluation_name: string + :param evaluation_name: A user-supplied name or description of the + `Evaluation`. + + :type ml_model_id: string + :param ml_model_id: The ID of the `MLModel` to evaluate. + The schema used in creating the `MLModel` must match the schema of the + `DataSource` used in the `Evaluation`. + + :type evaluation_data_source_id: string + :param evaluation_data_source_id: The ID of the `DataSource` for the + evaluation. The schema of the `DataSource` must match the schema + used to create the `MLModel`. + + """ + params = { + 'EvaluationId': evaluation_id, + 'MLModelId': ml_model_id, + 'EvaluationDataSourceId': evaluation_data_source_id, + } + if evaluation_name is not None: + params['EvaluationName'] = evaluation_name + return self.make_request(action='CreateEvaluation', + body=json.dumps(params)) + + def create_ml_model(self, ml_model_id, ml_model_type, + training_data_source_id, ml_model_name=None, + parameters=None, recipe=None, recipe_uri=None): + """ + Creates a new `MLModel` using the data files and the recipe as + information sources. + + An `MLModel` is nearly immutable. Users can only update the + `MLModelName` and the `ScoreThreshold` in an `MLModel` without + creating a new `MLModel`. + + `CreateMLModel` is an asynchronous operation. In response to + `CreateMLModel`, Amazon Machine Learning (Amazon ML) + immediately returns and sets the `MLModel` status to + `PENDING`. After the `MLModel` is created and ready for use, + Amazon ML sets the status to `COMPLETED`. + + You can use the GetMLModel operation to check progress of the + `MLModel` during the creation operation. + + CreateMLModel requires a `DataSource` with computed + statistics, which can be created by setting + `ComputeStatistics` to `True` in CreateDataSourceFromRDS, + CreateDataSourceFromS3, or CreateDataSourceFromRedshift + operations. + + :type ml_model_id: string + :param ml_model_id: A user-supplied ID that uniquely identifies the + `MLModel`. + + :type ml_model_name: string + :param ml_model_name: A user-supplied name or description of the + `MLModel`. + + :type ml_model_type: string + :param ml_model_type: The category of supervised learning that this + `MLModel` will address. Choose from the following types: + + + Choose `REGRESSION` if the `MLModel` will be used to predict a + numeric value. + + Choose `BINARY` if the `MLModel` result has two possible values. + + Choose `MULTICLASS` if the `MLModel` result has a limited number of + values. + + + For more information, see the `Amazon Machine Learning Developer + Guide`_. + + :type parameters: map + :param parameters: + A list of the training parameters in the `MLModel`. The list is + implemented as a map of key/value pairs. + + The following is the current set of training parameters: + + + + `sgd.l1RegularizationAmount` - Coefficient regularization L1 norm. It + controls overfitting the data by penalizing large coefficients. + This tends to drive coefficients to zero, resulting in sparse + feature set. If you use this parameter, start by specifying a small + value such as 1.0E-08. The value is a double that ranges from 0 to + MAX_DOUBLE. The default is not to use L1 normalization. The + parameter cannot be used when `L2` is specified. Use this parameter + sparingly. + + `sgd.l2RegularizationAmount` - Coefficient regularization L2 norm. It + controls overfitting the data by penalizing large coefficients. + This tends to drive coefficients to small, nonzero values. If you + use this parameter, start by specifying a small value such as + 1.0E-08. The valuseis a double that ranges from 0 to MAX_DOUBLE. + The default is not to use L2 normalization. This cannot be used + when `L1` is specified. Use this parameter sparingly. + + `sgd.maxPasses` - Number of times that the training process traverses + the observations to build the `MLModel`. The value is an integer + that ranges from 1 to 10000. The default value is 10. + + `sgd.maxMLModelSizeInBytes` - Maximum allowed size of the model. + Depending on the input data, the size of the model might affect its + performance. The value is an integer that ranges from 100000 to + 2147483648. The default value is 33554432. + + :type training_data_source_id: string + :param training_data_source_id: The `DataSource` that points to the + training data. + + :type recipe: string + :param recipe: The data recipe for creating `MLModel`. You must specify + either the recipe or its URI. If you dont specify a recipe or its + URI, Amazon ML creates a default. + + :type recipe_uri: string + :param recipe_uri: The Amazon Simple Storage Service (Amazon S3) + location and file name that contains the `MLModel` recipe. You must + specify either the recipe or its URI. If you dont specify a recipe + or its URI, Amazon ML creates a default. + + """ + params = { + 'MLModelId': ml_model_id, + 'MLModelType': ml_model_type, + 'TrainingDataSourceId': training_data_source_id, + } + if ml_model_name is not None: + params['MLModelName'] = ml_model_name + if parameters is not None: + params['Parameters'] = parameters + if recipe is not None: + params['Recipe'] = recipe + if recipe_uri is not None: + params['RecipeUri'] = recipe_uri + return self.make_request(action='CreateMLModel', + body=json.dumps(params)) + + def create_realtime_endpoint(self, ml_model_id): + """ + Creates a real-time endpoint for the `MLModel`. The endpoint + contains the URI of the `MLModel`; that is, the location to + send real-time prediction requests for the specified + `MLModel`. + + :type ml_model_id: string + :param ml_model_id: The ID assigned to the `MLModel` during creation. + + """ + params = {'MLModelId': ml_model_id, } + return self.make_request(action='CreateRealtimeEndpoint', + body=json.dumps(params)) + + def delete_batch_prediction(self, batch_prediction_id): + """ + Assigns the DELETED status to a `BatchPrediction`, rendering + it unusable. + + After using the `DeleteBatchPrediction` operation, you can use + the GetBatchPrediction operation to verify that the status of + the `BatchPrediction` changed to DELETED. + + The result of the `DeleteBatchPrediction` operation is + irreversible. + + :type batch_prediction_id: string + :param batch_prediction_id: A user-supplied ID that uniquely identifies + the `BatchPrediction`. + + """ + params = {'BatchPredictionId': batch_prediction_id, } + return self.make_request(action='DeleteBatchPrediction', + body=json.dumps(params)) + + def delete_data_source(self, data_source_id): + """ + Assigns the DELETED status to a `DataSource`, rendering it + unusable. + + After using the `DeleteDataSource` operation, you can use the + GetDataSource operation to verify that the status of the + `DataSource` changed to DELETED. + + The results of the `DeleteDataSource` operation are + irreversible. + + :type data_source_id: string + :param data_source_id: A user-supplied ID that uniquely identifies the + `DataSource`. + + """ + params = {'DataSourceId': data_source_id, } + return self.make_request(action='DeleteDataSource', + body=json.dumps(params)) + + def delete_evaluation(self, evaluation_id): + """ + Assigns the `DELETED` status to an `Evaluation`, rendering it + unusable. + + After invoking the `DeleteEvaluation` operation, you can use + the GetEvaluation operation to verify that the status of the + `Evaluation` changed to `DELETED`. + + The results of the `DeleteEvaluation` operation are + irreversible. + + :type evaluation_id: string + :param evaluation_id: A user-supplied ID that uniquely identifies the + `Evaluation` to delete. + + """ + params = {'EvaluationId': evaluation_id, } + return self.make_request(action='DeleteEvaluation', + body=json.dumps(params)) + + def delete_ml_model(self, ml_model_id): + """ + Assigns the DELETED status to an `MLModel`, rendering it + unusable. + + After using the `DeleteMLModel` operation, you can use the + GetMLModel operation to verify that the status of the + `MLModel` changed to DELETED. + + The result of the `DeleteMLModel` operation is irreversible. + + :type ml_model_id: string + :param ml_model_id: A user-supplied ID that uniquely identifies the + `MLModel`. + + """ + params = {'MLModelId': ml_model_id, } + return self.make_request(action='DeleteMLModel', + body=json.dumps(params)) + + def delete_realtime_endpoint(self, ml_model_id): + """ + Deletes a real time endpoint of an `MLModel`. + + :type ml_model_id: string + :param ml_model_id: The ID assigned to the `MLModel` during creation. + + """ + params = {'MLModelId': ml_model_id, } + return self.make_request(action='DeleteRealtimeEndpoint', + body=json.dumps(params)) + + def describe_batch_predictions(self, filter_variable=None, eq=None, + gt=None, lt=None, ge=None, le=None, + ne=None, prefix=None, sort_order=None, + next_token=None, limit=None): + """ + Returns a list of `BatchPrediction` operations that match the + search criteria in the request. + + :type filter_variable: string + :param filter_variable: + Use one of the following variables to filter a list of + `BatchPrediction`: + + + + `CreatedAt` - Sets the search criteria to the `BatchPrediction` + creation date. + + `Status` - Sets the search criteria to the `BatchPrediction` status. + + `Name` - Sets the search criteria to the contents of the + `BatchPrediction` ** ** `Name`. + + `IAMUser` - Sets the search criteria to the user account that invoked + the `BatchPrediction` creation. + + `MLModelId` - Sets the search criteria to the `MLModel` used in the + `BatchPrediction`. + + `DataSourceId` - Sets the search criteria to the `DataSource` used in + the `BatchPrediction`. + + `DataURI` - Sets the search criteria to the data file(s) used in the + `BatchPrediction`. The URL can identify either a file or an Amazon + Simple Storage Solution (Amazon S3) bucket or directory. + + :type eq: string + :param eq: The equal to operator. The `BatchPrediction` results will + have `FilterVariable` values that exactly match the value specified + with `EQ`. + + :type gt: string + :param gt: The greater than operator. The `BatchPrediction` results + will have `FilterVariable` values that are greater than the value + specified with `GT`. + + :type lt: string + :param lt: The less than operator. The `BatchPrediction` results will + have `FilterVariable` values that are less than the value specified + with `LT`. + + :type ge: string + :param ge: The greater than or equal to operator. The `BatchPrediction` + results will have `FilterVariable` values that are greater than or + equal to the value specified with `GE`. + + :type le: string + :param le: The less than or equal to operator. The `BatchPrediction` + results will have `FilterVariable` values that are less than or + equal to the value specified with `LE`. + + :type ne: string + :param ne: The not equal to operator. The `BatchPrediction` results + will have `FilterVariable` values not equal to the value specified + with `NE`. + + :type prefix: string + :param prefix: + A string that is found at the beginning of a variable, such as `Name` + or `Id`. + + For example, a `Batch Prediction` operation could have the `Name` + `2014-09-09-HolidayGiftMailer`. To search for this + `BatchPrediction`, select `Name` for the `FilterVariable` and any + of the following strings for the `Prefix`: + + + + 2014-09 + + 2014-09-09 + + 2014-09-09-Holiday + + :type sort_order: string + :param sort_order: A two-value parameter that determines the sequence + of the resulting list of `MLModel`s. + + + `asc` - Arranges the list in ascending order (A-Z, 0-9). + + `dsc` - Arranges the list in descending order (Z-A, 9-0). + + + Results are sorted by `FilterVariable`. + + :type next_token: string + :param next_token: An ID of the page in the paginated results. + + :type limit: integer + :param limit: The number of pages of information to include in the + result. The range of acceptable values is 1 through 100. The + default value is 100. + + """ + params = {} + if filter_variable is not None: + params['FilterVariable'] = filter_variable + if eq is not None: + params['EQ'] = eq + if gt is not None: + params['GT'] = gt + if lt is not None: + params['LT'] = lt + if ge is not None: + params['GE'] = ge + if le is not None: + params['LE'] = le + if ne is not None: + params['NE'] = ne + if prefix is not None: + params['Prefix'] = prefix + if sort_order is not None: + params['SortOrder'] = sort_order + if next_token is not None: + params['NextToken'] = next_token + if limit is not None: + params['Limit'] = limit + return self.make_request(action='DescribeBatchPredictions', + body=json.dumps(params)) + + def describe_data_sources(self, filter_variable=None, eq=None, gt=None, + lt=None, ge=None, le=None, ne=None, + prefix=None, sort_order=None, next_token=None, + limit=None): + """ + Returns a list of `DataSource` that match the search criteria + in the request. + + :type filter_variable: string + :param filter_variable: + Use one of the following variables to filter a list of `DataSource`: + + + + `CreatedAt` - Sets the search criteria to `DataSource` creation + dates. + + `Status` - Sets the search criteria to `DataSource` statuses. + + `Name` - Sets the search criteria to the contents of `DataSource` ** + ** `Name`. + + `DataUri` - Sets the search criteria to the URI of data files used to + create the `DataSource`. The URI can identify either a file or an + Amazon Simple Storage Service (Amazon S3) bucket or directory. + + `IAMUser` - Sets the search criteria to the user account that invoked + the `DataSource` creation. + + :type eq: string + :param eq: The equal to operator. The `DataSource` results will have + `FilterVariable` values that exactly match the value specified with + `EQ`. + + :type gt: string + :param gt: The greater than operator. The `DataSource` results will + have `FilterVariable` values that are greater than the value + specified with `GT`. + + :type lt: string + :param lt: The less than operator. The `DataSource` results will have + `FilterVariable` values that are less than the value specified with + `LT`. + + :type ge: string + :param ge: The greater than or equal to operator. The `DataSource` + results will have `FilterVariable` values that are greater than or + equal to the value specified with `GE`. + + :type le: string + :param le: The less than or equal to operator. The `DataSource` results + will have `FilterVariable` values that are less than or equal to + the value specified with `LE`. + + :type ne: string + :param ne: The not equal to operator. The `DataSource` results will + have `FilterVariable` values not equal to the value specified with + `NE`. + + :type prefix: string + :param prefix: + A string that is found at the beginning of a variable, such as `Name` + or `Id`. + + For example, a `DataSource` could have the `Name` + `2014-09-09-HolidayGiftMailer`. To search for this `DataSource`, + select `Name` for the `FilterVariable` and any of the following + strings for the `Prefix`: + + + + 2014-09 + + 2014-09-09 + + 2014-09-09-Holiday + + :type sort_order: string + :param sort_order: A two-value parameter that determines the sequence + of the resulting list of `DataSource`. + + + `asc` - Arranges the list in ascending order (A-Z, 0-9). + + `dsc` - Arranges the list in descending order (Z-A, 9-0). + + + Results are sorted by `FilterVariable`. + + :type next_token: string + :param next_token: The ID of the page in the paginated results. + + :type limit: integer + :param limit: The maximum number of `DataSource` to include in the + result. + + """ + params = {} + if filter_variable is not None: + params['FilterVariable'] = filter_variable + if eq is not None: + params['EQ'] = eq + if gt is not None: + params['GT'] = gt + if lt is not None: + params['LT'] = lt + if ge is not None: + params['GE'] = ge + if le is not None: + params['LE'] = le + if ne is not None: + params['NE'] = ne + if prefix is not None: + params['Prefix'] = prefix + if sort_order is not None: + params['SortOrder'] = sort_order + if next_token is not None: + params['NextToken'] = next_token + if limit is not None: + params['Limit'] = limit + return self.make_request(action='DescribeDataSources', + body=json.dumps(params)) + + def describe_evaluations(self, filter_variable=None, eq=None, gt=None, + lt=None, ge=None, le=None, ne=None, prefix=None, + sort_order=None, next_token=None, limit=None): + """ + Returns a list of `DescribeEvaluations` that match the search + criteria in the request. + + :type filter_variable: string + :param filter_variable: + Use one of the following variable to filter a list of `Evaluation` + objects: + + + + `CreatedAt` - Sets the search criteria to the `Evaluation` creation + date. + + `Status` - Sets the search criteria to the `Evaluation` status. + + `Name` - Sets the search criteria to the contents of `Evaluation` ** + ** `Name`. + + `IAMUser` - Sets the search criteria to the user account that invoked + an `Evaluation`. + + `MLModelId` - Sets the search criteria to the `MLModel` that was + evaluated. + + `DataSourceId` - Sets the search criteria to the `DataSource` used in + `Evaluation`. + + `DataUri` - Sets the search criteria to the data file(s) used in + `Evaluation`. The URL can identify either a file or an Amazon + Simple Storage Solution (Amazon S3) bucket or directory. + + :type eq: string + :param eq: The equal to operator. The `Evaluation` results will have + `FilterVariable` values that exactly match the value specified with + `EQ`. + + :type gt: string + :param gt: The greater than operator. The `Evaluation` results will + have `FilterVariable` values that are greater than the value + specified with `GT`. + + :type lt: string + :param lt: The less than operator. The `Evaluation` results will have + `FilterVariable` values that are less than the value specified with + `LT`. + + :type ge: string + :param ge: The greater than or equal to operator. The `Evaluation` + results will have `FilterVariable` values that are greater than or + equal to the value specified with `GE`. + + :type le: string + :param le: The less than or equal to operator. The `Evaluation` results + will have `FilterVariable` values that are less than or equal to + the value specified with `LE`. + + :type ne: string + :param ne: The not equal to operator. The `Evaluation` results will + have `FilterVariable` values not equal to the value specified with + `NE`. + + :type prefix: string + :param prefix: + A string that is found at the beginning of a variable, such as `Name` + or `Id`. + + For example, an `Evaluation` could have the `Name` + `2014-09-09-HolidayGiftMailer`. To search for this `Evaluation`, + select `Name` for the `FilterVariable` and any of the following + strings for the `Prefix`: + + + + 2014-09 + + 2014-09-09 + + 2014-09-09-Holiday + + :type sort_order: string + :param sort_order: A two-value parameter that determines the sequence + of the resulting list of `Evaluation`. + + + `asc` - Arranges the list in ascending order (A-Z, 0-9). + + `dsc` - Arranges the list in descending order (Z-A, 9-0). + + + Results are sorted by `FilterVariable`. + + :type next_token: string + :param next_token: The ID of the page in the paginated results. + + :type limit: integer + :param limit: The maximum number of `Evaluation` to include in the + result. + + """ + params = {} + if filter_variable is not None: + params['FilterVariable'] = filter_variable + if eq is not None: + params['EQ'] = eq + if gt is not None: + params['GT'] = gt + if lt is not None: + params['LT'] = lt + if ge is not None: + params['GE'] = ge + if le is not None: + params['LE'] = le + if ne is not None: + params['NE'] = ne + if prefix is not None: + params['Prefix'] = prefix + if sort_order is not None: + params['SortOrder'] = sort_order + if next_token is not None: + params['NextToken'] = next_token + if limit is not None: + params['Limit'] = limit + return self.make_request(action='DescribeEvaluations', + body=json.dumps(params)) + + def describe_ml_models(self, filter_variable=None, eq=None, gt=None, + lt=None, ge=None, le=None, ne=None, prefix=None, + sort_order=None, next_token=None, limit=None): + """ + Returns a list of `MLModel` that match the search criteria in + the request. + + :type filter_variable: string + :param filter_variable: + Use one of the following variables to filter a list of `MLModel`: + + + + `CreatedAt` - Sets the search criteria to `MLModel` creation date. + + `Status` - Sets the search criteria to `MLModel` status. + + `Name` - Sets the search criteria to the contents of `MLModel` ** ** + `Name`. + + `IAMUser` - Sets the search criteria to the user account that invoked + the `MLModel` creation. + + `TrainingDataSourceId` - Sets the search criteria to the `DataSource` + used to train one or more `MLModel`. + + `RealtimeEndpointStatus` - Sets the search criteria to the `MLModel` + real-time endpoint status. + + `MLModelType` - Sets the search criteria to `MLModel` type: binary, + regression, or multi-class. + + `Algorithm` - Sets the search criteria to the algorithm that the + `MLModel` uses. + + `TrainingDataURI` - Sets the search criteria to the data file(s) used + in training a `MLModel`. The URL can identify either a file or an + Amazon Simple Storage Service (Amazon S3) bucket or directory. + + :type eq: string + :param eq: The equal to operator. The `MLModel` results will have + `FilterVariable` values that exactly match the value specified with + `EQ`. + + :type gt: string + :param gt: The greater than operator. The `MLModel` results will have + `FilterVariable` values that are greater than the value specified + with `GT`. + + :type lt: string + :param lt: The less than operator. The `MLModel` results will have + `FilterVariable` values that are less than the value specified with + `LT`. + + :type ge: string + :param ge: The greater than or equal to operator. The `MLModel` results + will have `FilterVariable` values that are greater than or equal to + the value specified with `GE`. + + :type le: string + :param le: The less than or equal to operator. The `MLModel` results + will have `FilterVariable` values that are less than or equal to + the value specified with `LE`. + + :type ne: string + :param ne: The not equal to operator. The `MLModel` results will have + `FilterVariable` values not equal to the value specified with `NE`. + + :type prefix: string + :param prefix: + A string that is found at the beginning of a variable, such as `Name` + or `Id`. + + For example, an `MLModel` could have the `Name` + `2014-09-09-HolidayGiftMailer`. To search for this `MLModel`, + select `Name` for the `FilterVariable` and any of the following + strings for the `Prefix`: + + + + 2014-09 + + 2014-09-09 + + 2014-09-09-Holiday + + :type sort_order: string + :param sort_order: A two-value parameter that determines the sequence + of the resulting list of `MLModel`. + + + `asc` - Arranges the list in ascending order (A-Z, 0-9). + + `dsc` - Arranges the list in descending order (Z-A, 9-0). + + + Results are sorted by `FilterVariable`. + + :type next_token: string + :param next_token: The ID of the page in the paginated results. + + :type limit: integer + :param limit: The number of pages of information to include in the + result. The range of acceptable values is 1 through 100. The + default value is 100. + + """ + params = {} + if filter_variable is not None: + params['FilterVariable'] = filter_variable + if eq is not None: + params['EQ'] = eq + if gt is not None: + params['GT'] = gt + if lt is not None: + params['LT'] = lt + if ge is not None: + params['GE'] = ge + if le is not None: + params['LE'] = le + if ne is not None: + params['NE'] = ne + if prefix is not None: + params['Prefix'] = prefix + if sort_order is not None: + params['SortOrder'] = sort_order + if next_token is not None: + params['NextToken'] = next_token + if limit is not None: + params['Limit'] = limit + return self.make_request(action='DescribeMLModels', + body=json.dumps(params)) + + def get_batch_prediction(self, batch_prediction_id): + """ + Returns a `BatchPrediction` that includes detailed metadata, + status, and data file information for a `Batch Prediction` + request. + + :type batch_prediction_id: string + :param batch_prediction_id: An ID assigned to the `BatchPrediction` at + creation. + + """ + params = {'BatchPredictionId': batch_prediction_id, } + return self.make_request(action='GetBatchPrediction', + body=json.dumps(params)) + + def get_data_source(self, data_source_id, verbose=None): + """ + Returns a `DataSource` that includes metadata and data file + information, as well as the current status of the + `DataSource`. + + `GetDataSource` provides results in normal or verbose format. + The verbose format adds the schema description and the list of + files pointed to by the DataSource to the normal format. + + :type data_source_id: string + :param data_source_id: The ID assigned to the `DataSource` at creation. + + :type verbose: boolean + :param verbose: Specifies whether the `GetDataSource` operation should + return `DataSourceSchema`. + If true, `DataSourceSchema` is returned. + + If false, `DataSourceSchema` is not returned. + + """ + params = {'DataSourceId': data_source_id, } + if verbose is not None: + params['Verbose'] = verbose + return self.make_request(action='GetDataSource', + body=json.dumps(params)) + + def get_evaluation(self, evaluation_id): + """ + Returns an `Evaluation` that includes metadata as well as the + current status of the `Evaluation`. + + :type evaluation_id: string + :param evaluation_id: The ID of the `Evaluation` to retrieve. The + evaluation of each `MLModel` is recorded and cataloged. The ID + provides the means to access the information. + + """ + params = {'EvaluationId': evaluation_id, } + return self.make_request(action='GetEvaluation', + body=json.dumps(params)) + + def get_ml_model(self, ml_model_id, verbose=None): + """ + Returns an `MLModel` that includes detailed metadata, and data + source information as well as the current status of the + `MLModel`. + + `GetMLModel` provides results in normal or verbose format. + + :type ml_model_id: string + :param ml_model_id: The ID assigned to the `MLModel` at creation. + + :type verbose: boolean + :param verbose: Specifies whether the `GetMLModel` operation should + return `Recipe`. + If true, `Recipe` is returned. + + If false, `Recipe` is not returned. + + """ + params = {'MLModelId': ml_model_id, } + if verbose is not None: + params['Verbose'] = verbose + return self.make_request(action='GetMLModel', + body=json.dumps(params)) + + def predict(self, ml_model_id, record, predict_endpoint): + """ + Generates a prediction for the observation using the specified + `MLModel`. + + + Not all response parameters will be populated because this is + dependent on the type of requested model. + + :type ml_model_id: string + :param ml_model_id: A unique identifier of the `MLModel`. + + :type record: map + :param record: A map of variable name-value pairs that represent an + observation. + + :type predict_endpoint: string + :param predict_endpoint: The endpoint to send the predict request to. + + """ + predict_host = urlsplit(predict_endpoint).hostname + if predict_host is None: + predict_host = predict_endpoint + + params = { + 'MLModelId': ml_model_id, + 'Record': record, + 'PredictEndpoint': predict_host, + } + return self.make_request(action='Predict', + body=json.dumps(params), + host=predict_host) + + def update_batch_prediction(self, batch_prediction_id, + batch_prediction_name): + """ + Updates the `BatchPredictionName` of a `BatchPrediction`. + + You can use the GetBatchPrediction operation to view the + contents of the updated data element. + + :type batch_prediction_id: string + :param batch_prediction_id: The ID assigned to the `BatchPrediction` + during creation. + + :type batch_prediction_name: string + :param batch_prediction_name: A new user-supplied name or description + of the `BatchPrediction`. + + """ + params = { + 'BatchPredictionId': batch_prediction_id, + 'BatchPredictionName': batch_prediction_name, + } + return self.make_request(action='UpdateBatchPrediction', + body=json.dumps(params)) + + def update_data_source(self, data_source_id, data_source_name): + """ + Updates the `DataSourceName` of a `DataSource`. + + You can use the GetDataSource operation to view the contents + of the updated data element. + + :type data_source_id: string + :param data_source_id: The ID assigned to the `DataSource` during + creation. + + :type data_source_name: string + :param data_source_name: A new user-supplied name or description of the + `DataSource` that will replace the current description. + + """ + params = { + 'DataSourceId': data_source_id, + 'DataSourceName': data_source_name, + } + return self.make_request(action='UpdateDataSource', + body=json.dumps(params)) + + def update_evaluation(self, evaluation_id, evaluation_name): + """ + Updates the `EvaluationName` of an `Evaluation`. + + You can use the GetEvaluation operation to view the contents + of the updated data element. + + :type evaluation_id: string + :param evaluation_id: The ID assigned to the `Evaluation` during + creation. + + :type evaluation_name: string + :param evaluation_name: A new user-supplied name or description of the + `Evaluation` that will replace the current content. + + """ + params = { + 'EvaluationId': evaluation_id, + 'EvaluationName': evaluation_name, + } + return self.make_request(action='UpdateEvaluation', + body=json.dumps(params)) + + def update_ml_model(self, ml_model_id, ml_model_name=None, + score_threshold=None): + """ + Updates the `MLModelName` and the `ScoreThreshold` of an + `MLModel`. + + You can use the GetMLModel operation to view the contents of + the updated data element. + + :type ml_model_id: string + :param ml_model_id: The ID assigned to the `MLModel` during creation. + + :type ml_model_name: string + :param ml_model_name: A user-supplied name or description of the + `MLModel`. + + :type score_threshold: float + :param score_threshold: The `ScoreThreshold` used in binary + classification `MLModel` that marks the boundary between a positive + prediction and a negative prediction. + Output values greater than or equal to the `ScoreThreshold` receive a + positive result from the `MLModel`, such as `True`. Output values + less than the `ScoreThreshold` receive a negative response from the + `MLModel`, such as `False`. + + """ + params = {'MLModelId': ml_model_id, } + if ml_model_name is not None: + params['MLModelName'] = ml_model_name + if score_threshold is not None: + params['ScoreThreshold'] = score_threshold + return self.make_request(action='UpdateMLModel', + body=json.dumps(params)) + + def make_request(self, action, body, host=None): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request_kwargs = { + 'method':'POST', 'path':'/', 'auth_path':'/', 'params':{}, + 'headers': headers, 'data':body + } + if host is not None: + headers['Host'] = host + http_request_kwargs['host'] = host + http_request = self.build_base_http_request(**http_request_kwargs) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/manage/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/manage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..49d029ba2c9da739173ebf25051e06285e681e05 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/manage/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/manage/cmdshell.py b/desktop/core/ext-py/boto-2.38.0/boto/manage/cmdshell.py new file mode 100644 index 0000000000000000000000000000000000000000..f53227763a08a4c9c1beb94e585ab04c0e4dce40 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/manage/cmdshell.py @@ -0,0 +1,407 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +The cmdshell module uses the paramiko package to create SSH connections +to the servers that are represented by instance objects. The module has +functions for running commands, managing files, and opening interactive +shell sessions over those connections. +""" +from boto.mashups.interactive import interactive_shell +import boto +import os +import time +import shutil +import paramiko +import socket +import subprocess + +from boto.compat import StringIO + +class SSHClient(object): + """ + This class creates a paramiko.SSHClient() object that represents + a session with an SSH server. You can use the SSHClient object to send + commands to the remote host and manipulate files on the remote host. + + :ivar server: A Server object or FakeServer object. + :ivar host_key_file: The path to the user's .ssh key files. + :ivar uname: The username for the SSH connection. Default = 'root'. + :ivar timeout: The optional timeout variable for the TCP connection. + :ivar ssh_pwd: An optional password to use for authentication or for + unlocking the private key. + """ + def __init__(self, server, + host_key_file='~/.ssh/known_hosts', + uname='root', timeout=None, ssh_pwd=None): + self.server = server + self.host_key_file = host_key_file + self.uname = uname + self._timeout = timeout + self._pkey = paramiko.RSAKey.from_private_key_file(server.ssh_key_file, + password=ssh_pwd) + self._ssh_client = paramiko.SSHClient() + self._ssh_client.load_system_host_keys() + self._ssh_client.load_host_keys(os.path.expanduser(host_key_file)) + self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.connect() + + def connect(self, num_retries=5): + """ + Connect to an SSH server and authenticate with it. + + :type num_retries: int + :param num_retries: The maximum number of connection attempts. + """ + retry = 0 + while retry < num_retries: + try: + self._ssh_client.connect(self.server.hostname, + username=self.uname, + pkey=self._pkey, + timeout=self._timeout) + return + except socket.error as xxx_todo_changeme: + (value, message) = xxx_todo_changeme.args + if value in (51, 61, 111): + print('SSH Connection refused, will retry in 5 seconds') + time.sleep(5) + retry += 1 + else: + raise + except paramiko.BadHostKeyException: + print("%s has an entry in ~/.ssh/known_hosts and it doesn't match" % self.server.hostname) + print('Edit that file to remove the entry and then hit return to try again') + raw_input('Hit Enter when ready') + retry += 1 + except EOFError: + print('Unexpected Error from SSH Connection, retry in 5 seconds') + time.sleep(5) + retry += 1 + print('Could not establish SSH connection') + + def open_sftp(self): + """ + Open an SFTP session on the SSH server. + + :rtype: :class:`paramiko.sftp_client.SFTPClient` + :return: An SFTP client object. + """ + return self._ssh_client.open_sftp() + + def get_file(self, src, dst): + """ + Open an SFTP session on the remote host, and copy a file from + the remote host to the specified path on the local host. + + :type src: string + :param src: The path to the target file on the remote host. + + :type dst: string + :param dst: The path on your local host where you want to + store the file. + """ + sftp_client = self.open_sftp() + sftp_client.get(src, dst) + + def put_file(self, src, dst): + """ + Open an SFTP session on the remote host, and copy a file from + the local host to the specified path on the remote host. + + :type src: string + :param src: The path to the target file on your local host. + + :type dst: string + :param dst: The path on the remote host where you want to store + the file. + """ + sftp_client = self.open_sftp() + sftp_client.put(src, dst) + + def open(self, filename, mode='r', bufsize=-1): + """ + Open an SFTP session to the remote host, and open a file on + that host. + + :type filename: string + :param filename: The path to the file on the remote host. + + :type mode: string + :param mode: The file interaction mode. + + :type bufsize: integer + :param bufsize: The file buffer size. + + :rtype: :class:`paramiko.sftp_file.SFTPFile` + :return: A paramiko proxy object for a file on the remote server. + """ + sftp_client = self.open_sftp() + return sftp_client.open(filename, mode, bufsize) + + def listdir(self, path): + """ + List all of the files and subdirectories at the specified path + on the remote host. + + :type path: string + :param path: The base path from which to obtain the list. + + :rtype: list + :return: A list of files and subdirectories at the specified path. + """ + sftp_client = self.open_sftp() + return sftp_client.listdir(path) + + def isdir(self, path): + """ + Check the specified path on the remote host to determine if + it is a directory. + + :type path: string + :param path: The path to the directory that you want to check. + + :rtype: integer + :return: If the path is a directory, the function returns 1. + If the path is a file or an invalid path, the function + returns 0. + """ + status = self.run('[ -d %s ] || echo "FALSE"' % path) + if status[1].startswith('FALSE'): + return 0 + return 1 + + def exists(self, path): + """ + Check the remote host for the specified path, or a file + at the specified path. This function returns 1 if the + path or the file exist on the remote host, and returns 0 if + the path or the file does not exist on the remote host. + + :type path: string + :param path: The path to the directory or file that you want to check. + + :rtype: integer + :return: If the path or the file exist, the function returns 1. + If the path or the file do not exist on the remote host, + the function returns 0. + """ + + status = self.run('[ -a %s ] || echo "FALSE"' % path) + if status[1].startswith('FALSE'): + return 0 + return 1 + + def shell(self): + """ + Start an interactive shell session with the remote host. + """ + channel = self._ssh_client.invoke_shell() + interactive_shell(channel) + + def run(self, command): + """ + Run a command on the remote host. + + :type command: string + :param command: The command that you want to send to the remote host. + + :rtype: tuple + :return: This function returns a tuple that contains an integer status, + the stdout from the command, and the stderr from the command. + + """ + boto.log.debug('running:%s on %s' % (command, self.server.instance_id)) + status = 0 + try: + t = self._ssh_client.exec_command(command) + except paramiko.SSHException: + status = 1 + std_out = t[1].read() + std_err = t[2].read() + t[0].close() + t[1].close() + t[2].close() + boto.log.debug('stdout: %s' % std_out) + boto.log.debug('stderr: %s' % std_err) + return (status, std_out, std_err) + + def run_pty(self, command): + """ + Request a pseudo-terminal from a server, and execute a command on that + server. + + :type command: string + :param command: The command that you want to run on the remote host. + + :rtype: :class:`paramiko.channel.Channel` + :return: An open channel object. + """ + boto.log.debug('running:%s on %s' % (command, self.server.instance_id)) + channel = self._ssh_client.get_transport().open_session() + channel.get_pty() + channel.exec_command(command) + return channel + + def close(self): + """ + Close an SSH session and any open channels that are tied to it. + """ + transport = self._ssh_client.get_transport() + transport.close() + self.server.reset_cmdshell() + +class LocalClient(object): + """ + :ivar server: A Server object or FakeServer object. + :ivar host_key_file: The path to the user's .ssh key files. + :ivar uname: The username for the SSH connection. Default = 'root'. + """ + def __init__(self, server, host_key_file=None, uname='root'): + self.server = server + self.host_key_file = host_key_file + self.uname = uname + + def get_file(self, src, dst): + """ + Copy a file from one directory to another. + """ + shutil.copyfile(src, dst) + + def put_file(self, src, dst): + """ + Copy a file from one directory to another. + """ + shutil.copyfile(src, dst) + + def listdir(self, path): + """ + List all of the files and subdirectories at the specified path. + + :rtype: list + :return: Return a list containing the names of the entries + in the directory given by path. + """ + return os.listdir(path) + + def isdir(self, path): + """ + Check the specified path to determine if it is a directory. + + :rtype: boolean + :return: Returns True if the path is an existing directory. + """ + return os.path.isdir(path) + + def exists(self, path): + """ + Check for the specified path, or check a file at the specified path. + + :rtype: boolean + :return: If the path or the file exist, the function returns True. + """ + return os.path.exists(path) + + def shell(self): + raise NotImplementedError('shell not supported with LocalClient') + + def run(self): + """ + Open a subprocess and run a command on the local host. + + :rtype: tuple + :return: This function returns a tuple that contains an integer status + and a string with the combined stdout and stderr output. + """ + boto.log.info('running:%s' % self.command) + log_fp = StringIO() + process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + while process.poll() is None: + time.sleep(1) + t = process.communicate() + log_fp.write(t[0]) + log_fp.write(t[1]) + boto.log.info(log_fp.getvalue()) + boto.log.info('output: %s' % log_fp.getvalue()) + return (process.returncode, log_fp.getvalue()) + + def close(self): + pass + +class FakeServer(object): + """ + This object has a subset of the variables that are normally in a + :class:`boto.manage.server.Server` object. You can use this FakeServer + object to create a :class:`boto.manage.SSHClient` object if you + don't have a real Server object. + + :ivar instance: A boto Instance object. + :ivar ssh_key_file: The path to the SSH key file. + """ + def __init__(self, instance, ssh_key_file): + self.instance = instance + self.ssh_key_file = ssh_key_file + self.hostname = instance.dns_name + self.instance_id = self.instance.id + +def start(server): + """ + Connect to the specified server. + + :return: If the server is local, the function returns a + :class:`boto.manage.cmdshell.LocalClient` object. + If the server is remote, the function returns a + :class:`boto.manage.cmdshell.SSHClient` object. + """ + instance_id = boto.config.get('Instance', 'instance-id', None) + if instance_id == server.instance_id: + return LocalClient(server) + else: + return SSHClient(server) + +def sshclient_from_instance(instance, ssh_key_file, + host_key_file='~/.ssh/known_hosts', + user_name='root', ssh_pwd=None): + """ + Create and return an SSHClient object given an + instance object. + + :type instance: :class`boto.ec2.instance.Instance` object + :param instance: The instance object. + + :type ssh_key_file: string + :param ssh_key_file: A path to the private key file that is + used to log into the instance. + + :type host_key_file: string + :param host_key_file: A path to the known_hosts file used + by the SSH client. + Defaults to ~/.ssh/known_hosts + :type user_name: string + :param user_name: The username to use when logging into + the instance. Defaults to root. + + :type ssh_pwd: string + :param ssh_pwd: The passphrase, if any, associated with + private key. + """ + s = FakeServer(instance, ssh_key_file) + return SSHClient(s, host_key_file, user_name, ssh_pwd) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/manage/propget.py b/desktop/core/ext-py/boto-2.38.0/boto/manage/propget.py new file mode 100644 index 0000000000000000000000000000000000000000..d034127d8b8cb1c2c6211f2f2865c7fc9eb3c400 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/manage/propget.py @@ -0,0 +1,63 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +def get(prop, choices=None): + prompt = prop.verbose_name + if not prompt: + prompt = prop.name + if choices: + if callable(choices): + choices = choices() + else: + choices = prop.get_choices() + valid = False + while not valid: + if choices: + min = 1 + max = len(choices) + for i in range(min, max+1): + value = choices[i-1] + if isinstance(value, tuple): + value = value[0] + print('[%d] %s' % (i, value)) + value = raw_input('%s [%d-%d]: ' % (prompt, min, max)) + try: + int_value = int(value) + value = choices[int_value-1] + if isinstance(value, tuple): + value = value[1] + valid = True + except ValueError: + print('%s is not a valid choice' % value) + except IndexError: + print('%s is not within the range[%d-%d]' % (min, max)) + else: + value = raw_input('%s: ' % prompt) + try: + value = prop.validate(value) + if prop.empty(value) and prop.required: + print('A value is required') + else: + valid = True + except: + print('Invalid value: %s' % value) + return value + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/manage/server.py b/desktop/core/ext-py/boto-2.38.0/boto/manage/server.py new file mode 100644 index 0000000000000000000000000000000000000000..d9224ab8a8bf7e153bd34b41b5fa83c8d881b503 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/manage/server.py @@ -0,0 +1,556 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +High-level abstraction of an EC2 server +""" + +import boto.ec2 +from boto.mashups.iobject import IObject +from boto.pyami.config import BotoConfigPath, Config +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty, CalculatedProperty +from boto.manage import propget +from boto.ec2.zone import Zone +from boto.ec2.keypair import KeyPair +import os, time +from contextlib import closing +from boto.exception import EC2ResponseError +from boto.compat import six, StringIO + +InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', + 'c1.medium', 'c1.xlarge', + 'm2.2xlarge', 'm2.4xlarge'] + +class Bundler(object): + + def __init__(self, server, uname='root'): + from boto.manage.cmdshell import SSHClient + self.server = server + self.uname = uname + self.ssh_client = SSHClient(server, uname=uname) + + def copy_x509(self, key_file, cert_file): + print('\tcopying cert and pk over to /mnt directory on server') + self.ssh_client.open_sftp() + path, name = os.path.split(key_file) + self.remote_key_file = '/mnt/%s' % name + self.ssh_client.put_file(key_file, self.remote_key_file) + path, name = os.path.split(cert_file) + self.remote_cert_file = '/mnt/%s' % name + self.ssh_client.put_file(cert_file, self.remote_cert_file) + print('...complete!') + + def bundle_image(self, prefix, size, ssh_key): + command = "" + if self.uname != 'root': + command = "sudo " + command += 'ec2-bundle-vol ' + command += '-c %s -k %s ' % (self.remote_cert_file, self.remote_key_file) + command += '-u %s ' % self.server._reservation.owner_id + command += '-p %s ' % prefix + command += '-s %d ' % size + command += '-d /mnt ' + if self.server.instance_type == 'm1.small' or self.server.instance_type == 'c1.medium': + command += '-r i386' + else: + command += '-r x86_64' + return command + + def upload_bundle(self, bucket, prefix, ssh_key): + command = "" + if self.uname != 'root': + command = "sudo " + command += 'ec2-upload-bundle ' + command += '-m /mnt/%s.manifest.xml ' % prefix + command += '-b %s ' % bucket + command += '-a %s ' % self.server.ec2.aws_access_key_id + command += '-s %s ' % self.server.ec2.aws_secret_access_key + return command + + def bundle(self, bucket=None, prefix=None, key_file=None, cert_file=None, + size=None, ssh_key=None, fp=None, clear_history=True): + iobject = IObject() + if not bucket: + bucket = iobject.get_string('Name of S3 bucket') + if not prefix: + prefix = iobject.get_string('Prefix for AMI file') + if not key_file: + key_file = iobject.get_filename('Path to RSA private key file') + if not cert_file: + cert_file = iobject.get_filename('Path to RSA public cert file') + if not size: + size = iobject.get_int('Size (in MB) of bundled image') + if not ssh_key: + ssh_key = self.server.get_ssh_key_file() + self.copy_x509(key_file, cert_file) + if not fp: + fp = StringIO() + fp.write('sudo mv %s /mnt/boto.cfg; ' % BotoConfigPath) + fp.write('mv ~/.ssh/authorized_keys /mnt/authorized_keys; ') + if clear_history: + fp.write('history -c; ') + fp.write(self.bundle_image(prefix, size, ssh_key)) + fp.write('; ') + fp.write(self.upload_bundle(bucket, prefix, ssh_key)) + fp.write('; ') + fp.write('sudo mv /mnt/boto.cfg %s; ' % BotoConfigPath) + fp.write('mv /mnt/authorized_keys ~/.ssh/authorized_keys') + command = fp.getvalue() + print('running the following command on the remote server:') + print(command) + t = self.ssh_client.run(command) + print('\t%s' % t[0]) + print('\t%s' % t[1]) + print('...complete!') + print('registering image...') + self.image_id = self.server.ec2.register_image(name=prefix, image_location='%s/%s.manifest.xml' % (bucket, prefix)) + return self.image_id + +class CommandLineGetter(object): + + def get_ami_list(self): + my_amis = [] + for ami in self.ec2.get_all_images(): + # hack alert, need a better way to do this! + if ami.location.find('pyami') >= 0: + my_amis.append((ami.location, ami)) + return my_amis + + def get_region(self, params): + region = params.get('region', None) + if isinstance(region, basestring): + region = boto.ec2.get_region(region) + params['region'] = region + if not region: + prop = self.cls.find_property('region_name') + params['region'] = propget.get(prop, choices=boto.ec2.regions) + self.ec2 = params['region'].connect() + + def get_name(self, params): + if not params.get('name', None): + prop = self.cls.find_property('name') + params['name'] = propget.get(prop) + + def get_description(self, params): + if not params.get('description', None): + prop = self.cls.find_property('description') + params['description'] = propget.get(prop) + + def get_instance_type(self, params): + if not params.get('instance_type', None): + prop = StringProperty(name='instance_type', verbose_name='Instance Type', + choices=InstanceTypes) + params['instance_type'] = propget.get(prop) + + def get_quantity(self, params): + if not params.get('quantity', None): + prop = IntegerProperty(name='quantity', verbose_name='Number of Instances') + params['quantity'] = propget.get(prop) + + def get_zone(self, params): + if not params.get('zone', None): + prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', + choices=self.ec2.get_all_zones) + params['zone'] = propget.get(prop) + + def get_ami_id(self, params): + valid = False + while not valid: + ami = params.get('ami', None) + if not ami: + prop = StringProperty(name='ami', verbose_name='AMI') + ami = propget.get(prop) + try: + rs = self.ec2.get_all_images([ami]) + if len(rs) == 1: + valid = True + params['ami'] = rs[0] + except EC2ResponseError: + pass + + def get_group(self, params): + group = params.get('group', None) + if isinstance(group, basestring): + group_list = self.ec2.get_all_security_groups() + for g in group_list: + if g.name == group: + group = g + params['group'] = g + if not group: + prop = StringProperty(name='group', verbose_name='EC2 Security Group', + choices=self.ec2.get_all_security_groups) + params['group'] = propget.get(prop) + + def get_key(self, params): + keypair = params.get('keypair', None) + if isinstance(keypair, basestring): + key_list = self.ec2.get_all_key_pairs() + for k in key_list: + if k.name == keypair: + keypair = k.name + params['keypair'] = k.name + if not keypair: + prop = StringProperty(name='keypair', verbose_name='EC2 KeyPair', + choices=self.ec2.get_all_key_pairs) + params['keypair'] = propget.get(prop).name + + def get(self, cls, params): + self.cls = cls + self.get_region(params) + self.ec2 = params['region'].connect() + self.get_name(params) + self.get_description(params) + self.get_instance_type(params) + self.get_zone(params) + self.get_quantity(params) + self.get_ami_id(params) + self.get_group(params) + self.get_key(params) + +class Server(Model): + + # + # The properties of this object consists of real properties for data that + # is not already stored in EC2 somewhere (e.g. name, description) plus + # calculated properties for all of the properties that are already in + # EC2 (e.g. hostname, security groups, etc.) + # + name = StringProperty(unique=True, verbose_name="Name") + description = StringProperty(verbose_name="Description") + region_name = StringProperty(verbose_name="EC2 Region Name") + instance_id = StringProperty(verbose_name="EC2 Instance ID") + elastic_ip = StringProperty(verbose_name="EC2 Elastic IP Address") + production = BooleanProperty(verbose_name="Is This Server Production", default=False) + ami_id = CalculatedProperty(verbose_name="AMI ID", calculated_type=str, use_method=True) + zone = CalculatedProperty(verbose_name="Availability Zone Name", calculated_type=str, use_method=True) + hostname = CalculatedProperty(verbose_name="Public DNS Name", calculated_type=str, use_method=True) + private_hostname = CalculatedProperty(verbose_name="Private DNS Name", calculated_type=str, use_method=True) + groups = CalculatedProperty(verbose_name="Security Groups", calculated_type=list, use_method=True) + security_group = CalculatedProperty(verbose_name="Primary Security Group Name", calculated_type=str, use_method=True) + key_name = CalculatedProperty(verbose_name="Key Name", calculated_type=str, use_method=True) + instance_type = CalculatedProperty(verbose_name="Instance Type", calculated_type=str, use_method=True) + status = CalculatedProperty(verbose_name="Current Status", calculated_type=str, use_method=True) + launch_time = CalculatedProperty(verbose_name="Server Launch Time", calculated_type=str, use_method=True) + console_output = CalculatedProperty(verbose_name="Console Output", calculated_type=open, use_method=True) + + packages = [] + plugins = [] + + @classmethod + def add_credentials(cls, cfg, aws_access_key_id, aws_secret_access_key): + if not cfg.has_section('Credentials'): + cfg.add_section('Credentials') + cfg.set('Credentials', 'aws_access_key_id', aws_access_key_id) + cfg.set('Credentials', 'aws_secret_access_key', aws_secret_access_key) + if not cfg.has_section('DB_Server'): + cfg.add_section('DB_Server') + cfg.set('DB_Server', 'db_type', 'SimpleDB') + cfg.set('DB_Server', 'db_name', cls._manager.domain.name) + + @classmethod + def create(cls, config_file=None, logical_volume = None, cfg = None, **params): + """ + Create a new instance based on the specified configuration file or the specified + configuration and the passed in parameters. + + If the config_file argument is not None, the configuration is read from there. + Otherwise, the cfg argument is used. + + The config file may include other config files with a #import reference. The included + config files must reside in the same directory as the specified file. + + The logical_volume argument, if supplied, will be used to get the current physical + volume ID and use that as an override of the value specified in the config file. This + may be useful for debugging purposes when you want to debug with a production config + file but a test Volume. + + The dictionary argument may be used to override any EC2 configuration values in the + config file. + """ + if config_file: + cfg = Config(path=config_file) + if cfg.has_section('EC2'): + # include any EC2 configuration values that aren't specified in params: + for option in cfg.options('EC2'): + if option not in params: + params[option] = cfg.get('EC2', option) + getter = CommandLineGetter() + getter.get(cls, params) + region = params.get('region') + ec2 = region.connect() + cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key) + ami = params.get('ami') + kp = params.get('keypair') + group = params.get('group') + zone = params.get('zone') + # deal with possibly passed in logical volume: + if logical_volume != None: + cfg.set('EBS', 'logical_volume_name', logical_volume.name) + cfg_fp = StringIO() + cfg.write(cfg_fp) + # deal with the possibility that zone and/or keypair are strings read from the config file: + if isinstance(zone, Zone): + zone = zone.name + if isinstance(kp, KeyPair): + kp = kp.name + reservation = ami.run(min_count=1, + max_count=params.get('quantity', 1), + key_name=kp, + security_groups=[group], + instance_type=params.get('instance_type'), + placement = zone, + user_data = cfg_fp.getvalue()) + l = [] + i = 0 + elastic_ip = params.get('elastic_ip') + instances = reservation.instances + if elastic_ip is not None and instances.__len__() > 0: + instance = instances[0] + print('Waiting for instance to start so we can set its elastic IP address...') + # Sometimes we get a message from ec2 that says that the instance does not exist. + # Hopefully the following delay will giv eec2 enough time to get to a stable state: + time.sleep(5) + while instance.update() != 'running': + time.sleep(1) + instance.use_ip(elastic_ip) + print('set the elastic IP of the first instance to %s' % elastic_ip) + for instance in instances: + s = cls() + s.ec2 = ec2 + s.name = params.get('name') + '' if i==0 else str(i) + s.description = params.get('description') + s.region_name = region.name + s.instance_id = instance.id + if elastic_ip and i == 0: + s.elastic_ip = elastic_ip + s.put() + l.append(s) + i += 1 + return l + + @classmethod + def create_from_instance_id(cls, instance_id, name, description=''): + regions = boto.ec2.regions() + for region in regions: + ec2 = region.connect() + try: + rs = ec2.get_all_reservations([instance_id]) + except: + rs = [] + if len(rs) == 1: + s = cls() + s.ec2 = ec2 + s.name = name + s.description = description + s.region_name = region.name + s.instance_id = instance_id + s._reservation = rs[0] + for instance in s._reservation.instances: + if instance.id == instance_id: + s._instance = instance + s.put() + return s + return None + + @classmethod + def create_from_current_instances(cls): + servers = [] + regions = boto.ec2.regions() + for region in regions: + ec2 = region.connect() + rs = ec2.get_all_reservations() + for reservation in rs: + for instance in reservation.instances: + try: + next(Server.find(instance_id=instance.id)) + boto.log.info('Server for %s already exists' % instance.id) + except StopIteration: + s = cls() + s.ec2 = ec2 + s.name = instance.id + s.region_name = region.name + s.instance_id = instance.id + s._reservation = reservation + s.put() + servers.append(s) + return servers + + def __init__(self, id=None, **kw): + super(Server, self).__init__(id, **kw) + self.ssh_key_file = None + self.ec2 = None + self._cmdshell = None + self._reservation = None + self._instance = None + self._setup_ec2() + + def _setup_ec2(self): + if self.ec2 and self._instance and self._reservation: + return + if self.id: + if self.region_name: + for region in boto.ec2.regions(): + if region.name == self.region_name: + self.ec2 = region.connect() + if self.instance_id and not self._instance: + try: + rs = self.ec2.get_all_reservations([self.instance_id]) + if len(rs) >= 1: + for instance in rs[0].instances: + if instance.id == self.instance_id: + self._reservation = rs[0] + self._instance = instance + except EC2ResponseError: + pass + + def _status(self): + status = '' + if self._instance: + self._instance.update() + status = self._instance.state + return status + + def _hostname(self): + hostname = '' + if self._instance: + hostname = self._instance.public_dns_name + return hostname + + def _private_hostname(self): + hostname = '' + if self._instance: + hostname = self._instance.private_dns_name + return hostname + + def _instance_type(self): + it = '' + if self._instance: + it = self._instance.instance_type + return it + + def _launch_time(self): + lt = '' + if self._instance: + lt = self._instance.launch_time + return lt + + def _console_output(self): + co = '' + if self._instance: + co = self._instance.get_console_output() + return co + + def _groups(self): + gn = [] + if self._reservation: + gn = self._reservation.groups + return gn + + def _security_group(self): + groups = self._groups() + if len(groups) >= 1: + return groups[0].id + return "" + + def _zone(self): + zone = None + if self._instance: + zone = self._instance.placement + return zone + + def _key_name(self): + kn = None + if self._instance: + kn = self._instance.key_name + return kn + + def put(self): + super(Server, self).put() + self._setup_ec2() + + def delete(self): + if self.production: + raise ValueError("Can't delete a production server") + #self.stop() + super(Server, self).delete() + + def stop(self): + if self.production: + raise ValueError("Can't delete a production server") + if self._instance: + self._instance.stop() + + def terminate(self): + if self.production: + raise ValueError("Can't delete a production server") + if self._instance: + self._instance.terminate() + + def reboot(self): + if self._instance: + self._instance.reboot() + + def wait(self): + while self.status != 'running': + time.sleep(5) + + def get_ssh_key_file(self): + if not self.ssh_key_file: + ssh_dir = os.path.expanduser('~/.ssh') + if os.path.isdir(ssh_dir): + ssh_file = os.path.join(ssh_dir, '%s.pem' % self.key_name) + if os.path.isfile(ssh_file): + self.ssh_key_file = ssh_file + if not self.ssh_key_file: + iobject = IObject() + self.ssh_key_file = iobject.get_filename('Path to OpenSSH Key file') + return self.ssh_key_file + + def get_cmdshell(self): + if not self._cmdshell: + from boto.manage import cmdshell + self.get_ssh_key_file() + self._cmdshell = cmdshell.start(self) + return self._cmdshell + + def reset_cmdshell(self): + self._cmdshell = None + + def run(self, command): + with closing(self.get_cmdshell()) as cmd: + status = cmd.run(command) + return status + + def get_bundler(self, uname='root'): + self.get_ssh_key_file() + return Bundler(self, uname) + + def get_ssh_client(self, uname='root', ssh_pwd=None): + from boto.manage.cmdshell import SSHClient + self.get_ssh_key_file() + return SSHClient(self, uname=uname, ssh_pwd=ssh_pwd) + + def install(self, pkg): + return self.run('apt-get -y install %s' % pkg) + + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/manage/task.py b/desktop/core/ext-py/boto-2.38.0/boto/manage/task.py new file mode 100644 index 0000000000000000000000000000000000000000..c6663b9f0eb278fe515fe9541061e4feef6ce8ec --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/manage/task.py @@ -0,0 +1,176 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.sdb.db.property import StringProperty, DateTimeProperty, IntegerProperty +from boto.sdb.db.model import Model +import datetime, subprocess, time +from boto.compat import StringIO + +def check_hour(val): + if val == '*': + return + if int(val) < 0 or int(val) > 23: + raise ValueError + +class Task(Model): + + """ + A scheduled, repeating task that can be executed by any participating servers. + The scheduling is similar to cron jobs. Each task has an hour attribute. + The allowable values for hour are [0-23|*]. + + To keep the operation reasonably efficient and not cause excessive polling, + the minimum granularity of a Task is hourly. Some examples: + + hour='*' - the task would be executed each hour + hour='3' - the task would be executed at 3AM GMT each day. + + """ + name = StringProperty() + hour = StringProperty(required=True, validator=check_hour, default='*') + command = StringProperty(required=True) + last_executed = DateTimeProperty() + last_status = IntegerProperty() + last_output = StringProperty() + message_id = StringProperty() + + @classmethod + def start_all(cls, queue_name): + for task in cls.all(): + task.start(queue_name) + + def __init__(self, id=None, **kw): + super(Task, self).__init__(id, **kw) + self.hourly = self.hour == '*' + self.daily = self.hour != '*' + self.now = datetime.datetime.utcnow() + + def check(self): + """ + Determine how long until the next scheduled time for a Task. + Returns the number of seconds until the next scheduled time or zero + if the task needs to be run immediately. + If it's an hourly task and it's never been run, run it now. + If it's a daily task and it's never been run and the hour is right, run it now. + """ + boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed)) + + if self.hourly and not self.last_executed: + return 0 + + if self.daily and not self.last_executed: + if int(self.hour) == self.now.hour: + return 0 + else: + return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60 + + delta = self.now - self.last_executed + if self.hourly: + if delta.seconds >= 60*60: + return 0 + else: + return 60*60 - delta.seconds + else: + if int(self.hour) == self.now.hour: + if delta.days >= 1: + return 0 + else: + return 82800 # 23 hours, just to be safe + else: + return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60 + + def _run(self, msg, vtimeout): + boto.log.info('Task[%s] - running:%s' % (self.name, self.command)) + log_fp = StringIO() + process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + nsecs = 5 + current_timeout = vtimeout + while process.poll() is None: + boto.log.info('nsecs=%s, timeout=%s' % (nsecs, current_timeout)) + if nsecs >= current_timeout: + current_timeout += vtimeout + boto.log.info('Task[%s] - setting timeout to %d seconds' % (self.name, current_timeout)) + if msg: + msg.change_visibility(current_timeout) + time.sleep(5) + nsecs += 5 + t = process.communicate() + log_fp.write(t[0]) + log_fp.write(t[1]) + boto.log.info('Task[%s] - output: %s' % (self.name, log_fp.getvalue())) + self.last_executed = self.now + self.last_status = process.returncode + self.last_output = log_fp.getvalue()[0:1023] + + def run(self, msg, vtimeout=60): + delay = self.check() + boto.log.info('Task[%s] - delay=%s seconds' % (self.name, delay)) + if delay == 0: + self._run(msg, vtimeout) + queue = msg.queue + new_msg = queue.new_message(self.id) + new_msg = queue.write(new_msg) + self.message_id = new_msg.id + self.put() + boto.log.info('Task[%s] - new message id=%s' % (self.name, new_msg.id)) + msg.delete() + boto.log.info('Task[%s] - deleted message %s' % (self.name, msg.id)) + else: + boto.log.info('new_vtimeout: %d' % delay) + msg.change_visibility(delay) + + def start(self, queue_name): + boto.log.info('Task[%s] - starting with queue: %s' % (self.name, queue_name)) + queue = boto.lookup('sqs', queue_name) + msg = queue.new_message(self.id) + msg = queue.write(msg) + self.message_id = msg.id + self.put() + boto.log.info('Task[%s] - start successful' % self.name) + +class TaskPoller(object): + + def __init__(self, queue_name): + self.sqs = boto.connect_sqs() + self.queue = self.sqs.lookup(queue_name) + + def poll(self, wait=60, vtimeout=60): + while True: + m = self.queue.read(vtimeout) + if m: + task = Task.get_by_id(m.get_body()) + if task: + if not task.message_id or m.id == task.message_id: + boto.log.info('Task[%s] - read message %s' % (task.name, m.id)) + task.run(m, vtimeout) + else: + boto.log.info('Task[%s] - found extraneous message, ignoring' % task.name) + else: + time.sleep(wait) + + + + + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/manage/test_manage.py b/desktop/core/ext-py/boto-2.38.0/boto/manage/test_manage.py new file mode 100644 index 0000000000000000000000000000000000000000..a8c188c3192a6a9848e08a3cd968f21fabd7dfa6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/manage/test_manage.py @@ -0,0 +1,34 @@ +from boto.manage.server import Server +from boto.manage.volume import Volume +import time + +print('--> Creating New Volume') +volume = Volume.create() +print(volume) + +print('--> Creating New Server') +server_list = Server.create() +server = server_list[0] +print(server) + +print('----> Waiting for Server to start up') +while server.status != 'running': + print('*') + time.sleep(10) +print('----> Server is running') + +print('--> Run "df -k" on Server') +status = server.run('df -k') +print(status[1]) + +print('--> Now run volume.make_ready to make the volume ready to use on server') +volume.make_ready(server) + +print('--> Run "df -k" on Server') +status = server.run('df -k') +print(status[1]) + +print('--> Do an "ls -al" on the new filesystem') +status = server.run('ls -al %s' % volume.mount_point) +print(status[1]) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/manage/volume.py b/desktop/core/ext-py/boto-2.38.0/boto/manage/volume.py new file mode 100644 index 0000000000000000000000000000000000000000..410414c7b3354ea86d893cd92cfa23ca36c82310 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/manage/volume.py @@ -0,0 +1,420 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from __future__ import print_function + +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty, IntegerProperty, ListProperty, ReferenceProperty, CalculatedProperty +from boto.manage.server import Server +from boto.manage import propget +import boto.utils +import boto.ec2 +import time +import traceback +from contextlib import closing +import datetime + + +class CommandLineGetter(object): + + def get_region(self, params): + if not params.get('region', None): + prop = self.cls.find_property('region_name') + params['region'] = propget.get(prop, choices=boto.ec2.regions) + + def get_zone(self, params): + if not params.get('zone', None): + prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', + choices=self.ec2.get_all_zones) + params['zone'] = propget.get(prop) + + def get_name(self, params): + if not params.get('name', None): + prop = self.cls.find_property('name') + params['name'] = propget.get(prop) + + def get_size(self, params): + if not params.get('size', None): + prop = IntegerProperty(name='size', verbose_name='Size (GB)') + params['size'] = propget.get(prop) + + def get_mount_point(self, params): + if not params.get('mount_point', None): + prop = self.cls.find_property('mount_point') + params['mount_point'] = propget.get(prop) + + def get_device(self, params): + if not params.get('device', None): + prop = self.cls.find_property('device') + params['device'] = propget.get(prop) + + def get(self, cls, params): + self.cls = cls + self.get_region(params) + self.ec2 = params['region'].connect() + self.get_zone(params) + self.get_name(params) + self.get_size(params) + self.get_mount_point(params) + self.get_device(params) + +class Volume(Model): + + name = StringProperty(required=True, unique=True, verbose_name='Name') + region_name = StringProperty(required=True, verbose_name='EC2 Region') + zone_name = StringProperty(required=True, verbose_name='EC2 Zone') + mount_point = StringProperty(verbose_name='Mount Point') + device = StringProperty(verbose_name="Device Name", default='/dev/sdp') + volume_id = StringProperty(required=True) + past_volume_ids = ListProperty(item_type=str) + server = ReferenceProperty(Server, collection_name='volumes', + verbose_name='Server Attached To') + volume_state = CalculatedProperty(verbose_name="Volume State", + calculated_type=str, use_method=True) + attachment_state = CalculatedProperty(verbose_name="Attachment State", + calculated_type=str, use_method=True) + size = CalculatedProperty(verbose_name="Size (GB)", + calculated_type=int, use_method=True) + + @classmethod + def create(cls, **params): + getter = CommandLineGetter() + getter.get(cls, params) + region = params.get('region') + ec2 = region.connect() + zone = params.get('zone') + size = params.get('size') + ebs_volume = ec2.create_volume(size, zone.name) + v = cls() + v.ec2 = ec2 + v.volume_id = ebs_volume.id + v.name = params.get('name') + v.mount_point = params.get('mount_point') + v.device = params.get('device') + v.region_name = region.name + v.zone_name = zone.name + v.put() + return v + + @classmethod + def create_from_volume_id(cls, region_name, volume_id, name): + vol = None + ec2 = boto.ec2.connect_to_region(region_name) + rs = ec2.get_all_volumes([volume_id]) + if len(rs) == 1: + v = rs[0] + vol = cls() + vol.volume_id = v.id + vol.name = name + vol.region_name = v.region.name + vol.zone_name = v.zone + vol.put() + return vol + + def create_from_latest_snapshot(self, name, size=None): + snapshot = self.get_snapshots()[-1] + return self.create_from_snapshot(name, snapshot, size) + + def create_from_snapshot(self, name, snapshot, size=None): + if size < self.size: + size = self.size + ec2 = self.get_ec2_connection() + if self.zone_name is None or self.zone_name == '': + # deal with the migration case where the zone is not set in the logical volume: + current_volume = ec2.get_all_volumes([self.volume_id])[0] + self.zone_name = current_volume.zone + ebs_volume = ec2.create_volume(size, self.zone_name, snapshot) + v = Volume() + v.ec2 = self.ec2 + v.volume_id = ebs_volume.id + v.name = name + v.mount_point = self.mount_point + v.device = self.device + v.region_name = self.region_name + v.zone_name = self.zone_name + v.put() + return v + + def get_ec2_connection(self): + if self.server: + return self.server.ec2 + if not hasattr(self, 'ec2') or self.ec2 is None: + self.ec2 = boto.ec2.connect_to_region(self.region_name) + return self.ec2 + + def _volume_state(self): + ec2 = self.get_ec2_connection() + rs = ec2.get_all_volumes([self.volume_id]) + return rs[0].volume_state() + + def _attachment_state(self): + ec2 = self.get_ec2_connection() + rs = ec2.get_all_volumes([self.volume_id]) + return rs[0].attachment_state() + + def _size(self): + if not hasattr(self, '__size'): + ec2 = self.get_ec2_connection() + rs = ec2.get_all_volumes([self.volume_id]) + self.__size = rs[0].size + return self.__size + + def install_xfs(self): + if self.server: + self.server.install('xfsprogs xfsdump') + + def get_snapshots(self): + """ + Returns a list of all completed snapshots for this volume ID. + """ + ec2 = self.get_ec2_connection() + rs = ec2.get_all_snapshots() + all_vols = [self.volume_id] + self.past_volume_ids + snaps = [] + for snapshot in rs: + if snapshot.volume_id in all_vols: + if snapshot.progress == '100%': + snapshot.date = boto.utils.parse_ts(snapshot.start_time) + snapshot.keep = True + snaps.append(snapshot) + snaps.sort(cmp=lambda x, y: cmp(x.date, y.date)) + return snaps + + def attach(self, server=None): + if self.attachment_state == 'attached': + print('already attached') + return None + if server: + self.server = server + self.put() + ec2 = self.get_ec2_connection() + ec2.attach_volume(self.volume_id, self.server.instance_id, self.device) + + def detach(self, force=False): + state = self.attachment_state + if state == 'available' or state is None or state == 'detaching': + print('already detached') + return None + ec2 = self.get_ec2_connection() + ec2.detach_volume(self.volume_id, self.server.instance_id, self.device, force) + self.server = None + self.put() + + def checkfs(self, use_cmd=None): + if self.server is None: + raise ValueError('server attribute must be set to run this command') + # detemine state of file system on volume, only works if attached + if use_cmd: + cmd = use_cmd + else: + cmd = self.server.get_cmdshell() + status = cmd.run('xfs_check %s' % self.device) + if not use_cmd: + cmd.close() + if status[1].startswith('bad superblock magic number 0'): + return False + return True + + def wait(self): + if self.server is None: + raise ValueError('server attribute must be set to run this command') + with closing(self.server.get_cmdshell()) as cmd: + # wait for the volume device to appear + cmd = self.server.get_cmdshell() + while not cmd.exists(self.device): + boto.log.info('%s still does not exist, waiting 10 seconds' % self.device) + time.sleep(10) + + def format(self): + if self.server is None: + raise ValueError('server attribute must be set to run this command') + status = None + with closing(self.server.get_cmdshell()) as cmd: + if not self.checkfs(cmd): + boto.log.info('make_fs...') + status = cmd.run('mkfs -t xfs %s' % self.device) + return status + + def mount(self): + if self.server is None: + raise ValueError('server attribute must be set to run this command') + boto.log.info('handle_mount_point') + with closing(self.server.get_cmdshell()) as cmd: + cmd = self.server.get_cmdshell() + if not cmd.isdir(self.mount_point): + boto.log.info('making directory') + # mount directory doesn't exist so create it + cmd.run("mkdir %s" % self.mount_point) + else: + boto.log.info('directory exists already') + status = cmd.run('mount -l') + lines = status[1].split('\n') + for line in lines: + t = line.split() + if t and t[2] == self.mount_point: + # something is already mounted at the mount point + # unmount that and mount it as /tmp + if t[0] != self.device: + cmd.run('umount %s' % self.mount_point) + cmd.run('mount %s /tmp' % t[0]) + cmd.run('chmod 777 /tmp') + break + # Mount up our new EBS volume onto mount_point + cmd.run("mount %s %s" % (self.device, self.mount_point)) + cmd.run('xfs_growfs %s' % self.mount_point) + + def make_ready(self, server): + self.server = server + self.put() + self.install_xfs() + self.attach() + self.wait() + self.format() + self.mount() + + def freeze(self): + if self.server: + return self.server.run("/usr/sbin/xfs_freeze -f %s" % self.mount_point) + + def unfreeze(self): + if self.server: + return self.server.run("/usr/sbin/xfs_freeze -u %s" % self.mount_point) + + def snapshot(self): + # if this volume is attached to a server + # we need to freeze the XFS file system + try: + self.freeze() + if self.server is None: + snapshot = self.get_ec2_connection().create_snapshot(self.volume_id) + else: + snapshot = self.server.ec2.create_snapshot(self.volume_id) + boto.log.info('Snapshot of Volume %s created: %s' % (self.name, snapshot)) + except Exception: + boto.log.info('Snapshot error') + boto.log.info(traceback.format_exc()) + finally: + status = self.unfreeze() + return status + + def get_snapshot_range(self, snaps, start_date=None, end_date=None): + l = [] + for snap in snaps: + if start_date and end_date: + if snap.date >= start_date and snap.date <= end_date: + l.append(snap) + elif start_date: + if snap.date >= start_date: + l.append(snap) + elif end_date: + if snap.date <= end_date: + l.append(snap) + else: + l.append(snap) + return l + + def trim_snapshots(self, delete=False): + """ + Trim the number of snapshots for this volume. This method always + keeps the oldest snapshot. It then uses the parameters passed in + to determine how many others should be kept. + + The algorithm is to keep all snapshots from the current day. Then + it will keep the first snapshot of the day for the previous seven days. + Then, it will keep the first snapshot of the week for the previous + four weeks. After than, it will keep the first snapshot of the month + for as many months as there are. + + """ + snaps = self.get_snapshots() + # Always keep the oldest and the newest + if len(snaps) <= 2: + return snaps + snaps = snaps[1:-1] + now = datetime.datetime.now(snaps[0].date.tzinfo) + midnight = datetime.datetime(year=now.year, month=now.month, + day=now.day, tzinfo=now.tzinfo) + # Keep the first snapshot from each day of the previous week + one_week = datetime.timedelta(days=7, seconds=60*60) + print(midnight-one_week, midnight) + previous_week = self.get_snapshot_range(snaps, midnight-one_week, midnight) + print(previous_week) + if not previous_week: + return snaps + current_day = None + for snap in previous_week: + if current_day and current_day == snap.date.day: + snap.keep = False + else: + current_day = snap.date.day + # Get ourselves onto the next full week boundary + if previous_week: + week_boundary = previous_week[0].date + if week_boundary.weekday() != 0: + delta = datetime.timedelta(days=week_boundary.weekday()) + week_boundary = week_boundary - delta + # Keep one within this partial week + partial_week = self.get_snapshot_range(snaps, week_boundary, previous_week[0].date) + if len(partial_week) > 1: + for snap in partial_week[1:]: + snap.keep = False + # Keep the first snapshot of each week for the previous 4 weeks + for i in range(0, 4): + weeks_worth = self.get_snapshot_range(snaps, week_boundary-one_week, week_boundary) + if len(weeks_worth) > 1: + for snap in weeks_worth[1:]: + snap.keep = False + week_boundary = week_boundary - one_week + # Now look through all remaining snaps and keep one per month + remainder = self.get_snapshot_range(snaps, end_date=week_boundary) + current_month = None + for snap in remainder: + if current_month and current_month == snap.date.month: + snap.keep = False + else: + current_month = snap.date.month + if delete: + for snap in snaps: + if not snap.keep: + boto.log.info('Deleting %s(%s) for %s' % (snap, snap.date, self.name)) + snap.delete() + return snaps + + def grow(self, size): + pass + + def copy(self, snapshot): + pass + + def get_snapshot_from_date(self, date): + pass + + def delete(self, delete_ebs_volume=False): + if delete_ebs_volume: + self.detach() + ec2 = self.get_ec2_connection() + ec2.delete_volume(self.volume_id) + super(Volume, self).delete() + + def archive(self): + # snapshot volume, trim snaps, delete volume-id + pass + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mashups/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/mashups/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..449bd162a8ea33724103f1cba717f3255d1edea1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mashups/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mashups/interactive.py b/desktop/core/ext-py/boto-2.38.0/boto/mashups/interactive.py new file mode 100644 index 0000000000000000000000000000000000000000..1eb9db47d52494d8dc81e694b546e5b637500c01 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mashups/interactive.py @@ -0,0 +1,97 @@ +# Copyright (C) 2003-2007 Robey Pointer +# +# This file is part of paramiko. +# +# Paramiko is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Paramiko; if not, write to the Free Software Foundation, Inc., +# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. +from __future__ import print_function + +import socket +import sys + +# windows does not have termios... +try: + import termios + import tty + has_termios = True +except ImportError: + has_termios = False + + +def interactive_shell(chan): + if has_termios: + posix_shell(chan) + else: + windows_shell(chan) + + +def posix_shell(chan): + import select + + oldtty = termios.tcgetattr(sys.stdin) + try: + tty.setraw(sys.stdin.fileno()) + tty.setcbreak(sys.stdin.fileno()) + chan.settimeout(0.0) + + while True: + r, w, e = select.select([chan, sys.stdin], [], []) + if chan in r: + try: + x = chan.recv(1024) + if len(x) == 0: + print('\r\n*** EOF\r\n', end=' ') + break + sys.stdout.write(x) + sys.stdout.flush() + except socket.timeout: + pass + if sys.stdin in r: + x = sys.stdin.read(1) + if len(x) == 0: + break + chan.send(x) + + finally: + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty) + + +# thanks to Mike Looijmans for this code +def windows_shell(chan): + import threading + + sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n") + + def writeall(sock): + while True: + data = sock.recv(256) + if not data: + sys.stdout.write('\r\n*** EOF ***\r\n\r\n') + sys.stdout.flush() + break + sys.stdout.write(data) + sys.stdout.flush() + + writer = threading.Thread(target=writeall, args=(chan,)) + writer.start() + + try: + while True: + d = sys.stdin.read(1) + if not d: + break + chan.send(d) + except EOFError: + # user hit ^Z or F6 + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mashups/iobject.py b/desktop/core/ext-py/boto-2.38.0/boto/mashups/iobject.py new file mode 100644 index 0000000000000000000000000000000000000000..f6ae98a34a41326392fd6e73dc2fa2075ec902ba --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mashups/iobject.py @@ -0,0 +1,114 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import os + +def int_val_fn(v): + try: + int(v) + return True + except: + return False + +class IObject(object): + + def choose_from_list(self, item_list, search_str='', + prompt='Enter Selection'): + if not item_list: + print('No Choices Available') + return + choice = None + while not choice: + n = 1 + choices = [] + for item in item_list: + if isinstance(item, basestring): + print('[%d] %s' % (n, item)) + choices.append(item) + n += 1 + else: + obj, id, desc = item + if desc: + if desc.find(search_str) >= 0: + print('[%d] %s - %s' % (n, id, desc)) + choices.append(obj) + n += 1 + else: + if id.find(search_str) >= 0: + print('[%d] %s' % (n, id)) + choices.append(obj) + n += 1 + if choices: + val = raw_input('%s[1-%d]: ' % (prompt, len(choices))) + if val.startswith('/'): + search_str = val[1:] + else: + try: + int_val = int(val) + if int_val == 0: + return None + choice = choices[int_val-1] + except ValueError: + print('%s is not a valid choice' % val) + except IndexError: + print('%s is not within the range[1-%d]' % (val, + len(choices))) + else: + print("No objects matched your pattern") + search_str = '' + return choice + + def get_string(self, prompt, validation_fn=None): + okay = False + while not okay: + val = raw_input('%s: ' % prompt) + if validation_fn: + okay = validation_fn(val) + if not okay: + print('Invalid value: %s' % val) + else: + okay = True + return val + + def get_filename(self, prompt): + okay = False + val = '' + while not okay: + val = raw_input('%s: %s' % (prompt, val)) + val = os.path.expanduser(val) + if os.path.isfile(val): + okay = True + elif os.path.isdir(val): + path = val + val = self.choose_from_list(os.listdir(path)) + if val: + val = os.path.join(path, val) + okay = True + else: + val = '' + else: + print('Invalid value: %s' % val) + val = '' + return val + + def get_int(self, prompt): + s = self.get_string(prompt, int_val_fn) + return int(s) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mashups/order.py b/desktop/core/ext-py/boto-2.38.0/boto/mashups/order.py new file mode 100644 index 0000000000000000000000000000000000000000..4aaec307bd607d0d21734cfec94ae0f484ba619f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mashups/order.py @@ -0,0 +1,211 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +High-level abstraction of an EC2 order for servers +""" + +import boto +import boto.ec2 +from boto.mashups.server import Server, ServerSet +from boto.mashups.iobject import IObject +from boto.pyami.config import Config +from boto.sdb.persist import get_domain, set_domain +import time +from boto.compat import StringIO + +InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge'] + +class Item(IObject): + + def __init__(self): + self.region = None + self.name = None + self.instance_type = None + self.quantity = 0 + self.zone = None + self.ami = None + self.groups = [] + self.key = None + self.ec2 = None + self.config = None + + def set_userdata(self, key, value): + self.userdata[key] = value + + def get_userdata(self, key): + return self.userdata[key] + + def set_region(self, region=None): + if region: + self.region = region + else: + l = [(r, r.name, r.endpoint) for r in boto.ec2.regions()] + self.region = self.choose_from_list(l, prompt='Choose Region') + + def set_name(self, name=None): + if name: + self.name = name + else: + self.name = self.get_string('Name') + + def set_instance_type(self, instance_type=None): + if instance_type: + self.instance_type = instance_type + else: + self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type') + + def set_quantity(self, n=0): + if n > 0: + self.quantity = n + else: + self.quantity = self.get_int('Quantity') + + def set_zone(self, zone=None): + if zone: + self.zone = zone + else: + l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()] + self.zone = self.choose_from_list(l, prompt='Choose Availability Zone') + + def set_ami(self, ami=None): + if ami: + self.ami = ami + else: + l = [(a, a.id, a.location) for a in self.ec2.get_all_images()] + self.ami = self.choose_from_list(l, prompt='Choose AMI') + + def add_group(self, group=None): + if group: + self.groups.append(group) + else: + l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()] + self.groups.append(self.choose_from_list(l, prompt='Choose Security Group')) + + def set_key(self, key=None): + if key: + self.key = key + else: + l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()] + self.key = self.choose_from_list(l, prompt='Choose Keypair') + + def update_config(self): + if not self.config.has_section('Credentials'): + self.config.add_section('Credentials') + self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id) + self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key) + if not self.config.has_section('Pyami'): + self.config.add_section('Pyami') + sdb_domain = get_domain() + if sdb_domain: + self.config.set('Pyami', 'server_sdb_domain', sdb_domain) + self.config.set('Pyami', 'server_sdb_name', self.name) + + def set_config(self, config_path=None): + if not config_path: + config_path = self.get_filename('Specify Config file') + self.config = Config(path=config_path) + + def get_userdata_string(self): + s = StringIO() + self.config.write(s) + return s.getvalue() + + def enter(self, **params): + self.region = params.get('region', self.region) + if not self.region: + self.set_region() + self.ec2 = self.region.connect() + self.name = params.get('name', self.name) + if not self.name: + self.set_name() + self.instance_type = params.get('instance_type', self.instance_type) + if not self.instance_type: + self.set_instance_type() + self.zone = params.get('zone', self.zone) + if not self.zone: + self.set_zone() + self.quantity = params.get('quantity', self.quantity) + if not self.quantity: + self.set_quantity() + self.ami = params.get('ami', self.ami) + if not self.ami: + self.set_ami() + self.groups = params.get('groups', self.groups) + if not self.groups: + self.add_group() + self.key = params.get('key', self.key) + if not self.key: + self.set_key() + self.config = params.get('config', self.config) + if not self.config: + self.set_config() + self.update_config() + +class Order(IObject): + + def __init__(self): + self.items = [] + self.reservation = None + + def add_item(self, **params): + item = Item() + item.enter(**params) + self.items.append(item) + + def display(self): + print('This Order consists of the following items') + print() + print('QTY\tNAME\tTYPE\nAMI\t\tGroups\t\t\tKeyPair') + for item in self.items: + print('%s\t%s\t%s\t%s\t%s\t%s' % (item.quantity, item.name, item.instance_type, + item.ami.id, item.groups, item.key.name)) + + def place(self, block=True): + if get_domain() is None: + print('SDB Persistence Domain not set') + domain_name = self.get_string('Specify SDB Domain') + set_domain(domain_name) + s = ServerSet() + for item in self.items: + r = item.ami.run(min_count=1, max_count=item.quantity, + key_name=item.key.name, user_data=item.get_userdata_string(), + security_groups=item.groups, instance_type=item.instance_type, + placement=item.zone.name) + if block: + states = [i.state for i in r.instances] + if states.count('running') != len(states): + print(states) + time.sleep(15) + states = [i.update() for i in r.instances] + for i in r.instances: + server = Server() + server.name = item.name + server.instance_id = i.id + server.reservation = r + server.save() + s.append(server) + if len(s) == 1: + return s[0] + else: + return s + + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mashups/server.py b/desktop/core/ext-py/boto-2.38.0/boto/mashups/server.py new file mode 100644 index 0000000000000000000000000000000000000000..7045e7f4da0f46c85415cad5c38e24fd5989734c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mashups/server.py @@ -0,0 +1,395 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +High-level abstraction of an EC2 server +""" + +import boto +import boto.utils +from boto.compat import StringIO +from boto.mashups.iobject import IObject +from boto.pyami.config import Config, BotoConfigPath +from boto.mashups.interactive import interactive_shell +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty +import os + + +class ServerSet(list): + + def __getattr__(self, name): + results = [] + is_callable = False + for server in self: + try: + val = getattr(server, name) + if callable(val): + is_callable = True + results.append(val) + except: + results.append(None) + if is_callable: + self.map_list = results + return self.map + return results + + def map(self, *args): + results = [] + for fn in self.map_list: + results.append(fn(*args)) + return results + +class Server(Model): + + @property + def ec2(self): + if self._ec2 is None: + self._ec2 = boto.connect_ec2() + return self._ec2 + + @classmethod + def Inventory(cls): + """ + Returns a list of Server instances, one for each Server object + persisted in the db + """ + l = ServerSet() + rs = cls.find() + for server in rs: + l.append(server) + return l + + @classmethod + def Register(cls, name, instance_id, description=''): + s = cls() + s.name = name + s.instance_id = instance_id + s.description = description + s.save() + return s + + def __init__(self, id=None, **kw): + super(Server, self).__init__(id, **kw) + self._reservation = None + self._instance = None + self._ssh_client = None + self._pkey = None + self._config = None + self._ec2 = None + + name = StringProperty(unique=True, verbose_name="Name") + instance_id = StringProperty(verbose_name="Instance ID") + config_uri = StringProperty() + ami_id = StringProperty(verbose_name="AMI ID") + zone = StringProperty(verbose_name="Availability Zone") + security_group = StringProperty(verbose_name="Security Group", default="default") + key_name = StringProperty(verbose_name="Key Name") + elastic_ip = StringProperty(verbose_name="Elastic IP") + instance_type = StringProperty(verbose_name="Instance Type") + description = StringProperty(verbose_name="Description") + log = StringProperty() + + def setReadOnly(self, value): + raise AttributeError + + def getInstance(self): + if not self._instance: + if self.instance_id: + try: + rs = self.ec2.get_all_reservations([self.instance_id]) + except: + return None + if len(rs) > 0: + self._reservation = rs[0] + self._instance = self._reservation.instances[0] + return self._instance + + instance = property(getInstance, setReadOnly, None, 'The Instance for the server') + + def getAMI(self): + if self.instance: + return self.instance.image_id + + ami = property(getAMI, setReadOnly, None, 'The AMI for the server') + + def getStatus(self): + if self.instance: + self.instance.update() + return self.instance.state + + status = property(getStatus, setReadOnly, None, + 'The status of the server') + + def getHostname(self): + if self.instance: + return self.instance.public_dns_name + + hostname = property(getHostname, setReadOnly, None, + 'The public DNS name of the server') + + def getPrivateHostname(self): + if self.instance: + return self.instance.private_dns_name + + private_hostname = property(getPrivateHostname, setReadOnly, None, + 'The private DNS name of the server') + + def getLaunchTime(self): + if self.instance: + return self.instance.launch_time + + launch_time = property(getLaunchTime, setReadOnly, None, + 'The time the Server was started') + + def getConsoleOutput(self): + if self.instance: + return self.instance.get_console_output() + + console_output = property(getConsoleOutput, setReadOnly, None, + 'Retrieve the console output for server') + + def getGroups(self): + if self._reservation: + return self._reservation.groups + else: + return None + + groups = property(getGroups, setReadOnly, None, + 'The Security Groups controlling access to this server') + + def getConfig(self): + if not self._config: + remote_file = BotoConfigPath + local_file = '%s.ini' % self.instance.id + self.get_file(remote_file, local_file) + self._config = Config(local_file) + return self._config + + def setConfig(self, config): + local_file = '%s.ini' % self.instance.id + fp = open(local_file) + config.write(fp) + fp.close() + self.put_file(local_file, BotoConfigPath) + self._config = config + + config = property(getConfig, setConfig, None, + 'The instance data for this server') + + def set_config(self, config): + """ + Set SDB based config + """ + self._config = config + self._config.dump_to_sdb("botoConfigs", self.id) + + def load_config(self): + self._config = Config(do_load=False) + self._config.load_from_sdb("botoConfigs", self.id) + + def stop(self): + if self.instance: + self.instance.stop() + + def start(self): + self.stop() + ec2 = boto.connect_ec2() + ami = ec2.get_all_images(image_ids = [str(self.ami_id)])[0] + groups = ec2.get_all_security_groups(groupnames=[str(self.security_group)]) + if not self._config: + self.load_config() + if not self._config.has_section("Credentials"): + self._config.add_section("Credentials") + self._config.set("Credentials", "aws_access_key_id", ec2.aws_access_key_id) + self._config.set("Credentials", "aws_secret_access_key", ec2.aws_secret_access_key) + + if not self._config.has_section("Pyami"): + self._config.add_section("Pyami") + + if self._manager.domain: + self._config.set('Pyami', 'server_sdb_domain', self._manager.domain.name) + self._config.set("Pyami", 'server_sdb_name', self.name) + + cfg = StringIO() + self._config.write(cfg) + cfg = cfg.getvalue() + r = ami.run(min_count=1, + max_count=1, + key_name=self.key_name, + security_groups = groups, + instance_type = self.instance_type, + placement = self.zone, + user_data = cfg) + i = r.instances[0] + self.instance_id = i.id + self.put() + if self.elastic_ip: + ec2.associate_address(self.instance_id, self.elastic_ip) + + def reboot(self): + if self.instance: + self.instance.reboot() + + def get_ssh_client(self, key_file=None, host_key_file='~/.ssh/known_hosts', + uname='root'): + import paramiko + if not self.instance: + print('No instance yet!') + return + if not self._ssh_client: + if not key_file: + iobject = IObject() + key_file = iobject.get_filename('Path to OpenSSH Key file') + self._pkey = paramiko.RSAKey.from_private_key_file(key_file) + self._ssh_client = paramiko.SSHClient() + self._ssh_client.load_system_host_keys() + self._ssh_client.load_host_keys(os.path.expanduser(host_key_file)) + self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self._ssh_client.connect(self.instance.public_dns_name, + username=uname, pkey=self._pkey) + return self._ssh_client + + def get_file(self, remotepath, localpath): + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + sftp_client.get(remotepath, localpath) + + def put_file(self, localpath, remotepath): + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + sftp_client.put(localpath, remotepath) + + def listdir(self, remotepath): + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + return sftp_client.listdir(remotepath) + + def shell(self, key_file=None): + ssh_client = self.get_ssh_client(key_file) + channel = ssh_client.invoke_shell() + interactive_shell(channel) + + def bundle_image(self, prefix, key_file, cert_file, size): + print('bundling image...') + print('\tcopying cert and pk over to /mnt directory on server') + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + path, name = os.path.split(key_file) + remote_key_file = '/mnt/%s' % name + self.put_file(key_file, remote_key_file) + path, name = os.path.split(cert_file) + remote_cert_file = '/mnt/%s' % name + self.put_file(cert_file, remote_cert_file) + print('\tdeleting %s' % BotoConfigPath) + # delete the metadata.ini file if it exists + try: + sftp_client.remove(BotoConfigPath) + except: + pass + command = 'sudo ec2-bundle-vol ' + command += '-c %s -k %s ' % (remote_cert_file, remote_key_file) + command += '-u %s ' % self._reservation.owner_id + command += '-p %s ' % prefix + command += '-s %d ' % size + command += '-d /mnt ' + if self.instance.instance_type == 'm1.small' or self.instance_type == 'c1.medium': + command += '-r i386' + else: + command += '-r x86_64' + print('\t%s' % command) + t = ssh_client.exec_command(command) + response = t[1].read() + print('\t%s' % response) + print('\t%s' % t[2].read()) + print('...complete!') + + def upload_bundle(self, bucket, prefix): + print('uploading bundle...') + command = 'ec2-upload-bundle ' + command += '-m /mnt/%s.manifest.xml ' % prefix + command += '-b %s ' % bucket + command += '-a %s ' % self.ec2.aws_access_key_id + command += '-s %s ' % self.ec2.aws_secret_access_key + print('\t%s' % command) + ssh_client = self.get_ssh_client() + t = ssh_client.exec_command(command) + response = t[1].read() + print('\t%s' % response) + print('\t%s' % t[2].read()) + print('...complete!') + + def create_image(self, bucket=None, prefix=None, key_file=None, cert_file=None, size=None): + iobject = IObject() + if not bucket: + bucket = iobject.get_string('Name of S3 bucket') + if not prefix: + prefix = iobject.get_string('Prefix for AMI file') + if not key_file: + key_file = iobject.get_filename('Path to RSA private key file') + if not cert_file: + cert_file = iobject.get_filename('Path to RSA public cert file') + if not size: + size = iobject.get_int('Size (in MB) of bundled image') + self.bundle_image(prefix, key_file, cert_file, size) + self.upload_bundle(bucket, prefix) + print('registering image...') + self.image_id = self.ec2.register_image('%s/%s.manifest.xml' % (bucket, prefix)) + return self.image_id + + def attach_volume(self, volume, device="/dev/sdp"): + """ + Attach an EBS volume to this server + + :param volume: EBS Volume to attach + :type volume: boto.ec2.volume.Volume + + :param device: Device to attach to (default to /dev/sdp) + :type device: string + """ + if hasattr(volume, "id"): + volume_id = volume.id + else: + volume_id = volume + return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device) + + def detach_volume(self, volume): + """ + Detach an EBS volume from this server + + :param volume: EBS Volume to detach + :type volume: boto.ec2.volume.Volume + """ + if hasattr(volume, "id"): + volume_id = volume.id + else: + volume_id = volume + return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id) + + def install_package(self, package_name): + print('installing %s...' % package_name) + command = 'yum -y install %s' % package_name + print('\t%s' % command) + ssh_client = self.get_ssh_client() + t = ssh_client.exec_command(command) + response = t[1].read() + print('\t%s' % response) + print('\t%s' % t[2].read()) + print('...complete!') diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mturk/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/mturk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..449bd162a8ea33724103f1cba717f3255d1edea1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mturk/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mturk/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/mturk/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..4f2a23faf1a8a8f7e8d78fbcf8cc4302b5bb0666 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mturk/connection.py @@ -0,0 +1,1052 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import xml.sax +import datetime +import itertools + +from boto import handler +from boto import config +from boto.mturk.price import Price +import boto.mturk.notification +from boto.connection import AWSQueryConnection +from boto.exception import EC2ResponseError +from boto.resultset import ResultSet +from boto.mturk.question import QuestionForm, ExternalQuestion, HTMLQuestion + + +class MTurkRequestError(EC2ResponseError): + "Error for MTurk Requests" + # todo: subclass from an abstract parent of EC2ResponseError + + +class MTurkConnection(AWSQueryConnection): + + APIVersion = '2012-03-25' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, + host=None, debug=0, + https_connection_factory=None, security_token=None, + profile_name=None): + if not host: + if config.has_option('MTurk', 'sandbox') and config.get('MTurk', 'sandbox') == 'True': + host = 'mechanicalturk.sandbox.amazonaws.com' + else: + host = 'mechanicalturk.amazonaws.com' + self.debug = debug + + super(MTurkConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, host, debug, + https_connection_factory, + security_token=security_token, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['mturk'] + + def get_account_balance(self): + """ + """ + params = {} + return self._process_request('GetAccountBalance', params, + [('AvailableBalance', Price), + ('OnHoldBalance', Price)]) + + def register_hit_type(self, title, description, reward, duration, + keywords=None, approval_delay=None, qual_req=None): + """ + Register a new HIT Type + title, description are strings + reward is a Price object + duration can be a timedelta, or an object castable to an int + """ + params = dict( + Title=title, + Description=description, + AssignmentDurationInSeconds=self.duration_as_seconds(duration), + ) + params.update(MTurkConnection.get_price_as_price(reward).get_as_params('Reward')) + + if keywords: + params['Keywords'] = self.get_keywords_as_string(keywords) + + if approval_delay is not None: + d = self.duration_as_seconds(approval_delay) + params['AutoApprovalDelayInSeconds'] = d + + if qual_req is not None: + params.update(qual_req.get_as_params()) + + return self._process_request('RegisterHITType', params, + [('HITTypeId', HITTypeId)]) + + def set_email_notification(self, hit_type, email, event_types=None): + """ + Performs a SetHITTypeNotification operation to set email + notification for a specified HIT type + """ + return self._set_notification(hit_type, 'Email', email, + 'SetHITTypeNotification', event_types) + + def set_rest_notification(self, hit_type, url, event_types=None): + """ + Performs a SetHITTypeNotification operation to set REST notification + for a specified HIT type + """ + return self._set_notification(hit_type, 'REST', url, + 'SetHITTypeNotification', event_types) + + def set_sqs_notification(self, hit_type, queue_url, event_types=None): + """ + Performs a SetHITTypeNotification operation so set SQS notification + for a specified HIT type. Queue URL is of form: + https://queue.amazonaws.com// and can be + found when looking at the details for a Queue in the AWS Console + """ + return self._set_notification(hit_type, "SQS", queue_url, + 'SetHITTypeNotification', event_types) + + def send_test_event_notification(self, hit_type, url, + event_types=None, + test_event_type='Ping'): + """ + Performs a SendTestEventNotification operation with REST notification + for a specified HIT type + """ + return self._set_notification(hit_type, 'REST', url, + 'SendTestEventNotification', + event_types, test_event_type) + + def _set_notification(self, hit_type, transport, + destination, request_type, + event_types=None, test_event_type=None): + """ + Common operation to set notification or send a test event + notification for a specified HIT type + """ + params = {'HITTypeId': hit_type} + + # from the Developer Guide: + # The 'Active' parameter is optional. If omitted, the active status of + # the HIT type's notification specification is unchanged. All HIT types + # begin with their notification specifications in the "inactive" status. + notification_params = {'Destination': destination, + 'Transport': transport, + 'Version': boto.mturk.notification.NotificationMessage.NOTIFICATION_VERSION, + 'Active': True, + } + + # add specific event types if required + if event_types: + self.build_list_params(notification_params, event_types, + 'EventType') + + # Set up dict of 'Notification.1.Transport' etc. values + notification_rest_params = {} + num = 1 + for key in notification_params: + notification_rest_params['Notification.%d.%s' % (num, key)] = notification_params[key] + + # Update main params dict + params.update(notification_rest_params) + + # If test notification, specify the notification type to be tested + if test_event_type: + params.update({'TestEventType': test_event_type}) + + # Execute operation + return self._process_request(request_type, params) + + def create_hit(self, hit_type=None, question=None, hit_layout=None, + lifetime=datetime.timedelta(days=7), + max_assignments=1, + title=None, description=None, keywords=None, + reward=None, duration=datetime.timedelta(days=7), + approval_delay=None, annotation=None, + questions=None, qualifications=None, + layout_params=None, response_groups=None): + """ + Creates a new HIT. + Returns a ResultSet + See: http://docs.amazonwebservices.com/AWSMechTurk/2012-03-25/AWSMturkAPI/ApiReference_CreateHITOperation.html + """ + + # Handle basic required arguments and set up params dict + params = {'LifetimeInSeconds': + self.duration_as_seconds(lifetime), + 'MaxAssignments': max_assignments, + } + + # handle single or multiple questions or layouts + neither = question is None and questions is None + if hit_layout is None: + both = question is not None and questions is not None + if neither or both: + raise ValueError("Must specify question (single Question instance) or questions (list or QuestionForm instance), but not both") + if question: + questions = [question] + question_param = QuestionForm(questions) + if isinstance(question, QuestionForm): + question_param = question + elif isinstance(question, ExternalQuestion): + question_param = question + elif isinstance(question, HTMLQuestion): + question_param = question + params['Question'] = question_param.get_as_xml() + else: + if not neither: + raise ValueError("Must not specify question (single Question instance) or questions (list or QuestionForm instance) when specifying hit_layout") + params['HITLayoutId'] = hit_layout + if layout_params: + params.update(layout_params.get_as_params()) + + # if hit type specified then add it + # else add the additional required parameters + if hit_type: + params['HITTypeId'] = hit_type + else: + # Handle keywords + final_keywords = MTurkConnection.get_keywords_as_string(keywords) + + # Handle price argument + final_price = MTurkConnection.get_price_as_price(reward) + + final_duration = self.duration_as_seconds(duration) + + additional_params = dict( + Title=title, + Description=description, + Keywords=final_keywords, + AssignmentDurationInSeconds=final_duration, + ) + additional_params.update(final_price.get_as_params('Reward')) + + if approval_delay is not None: + d = self.duration_as_seconds(approval_delay) + additional_params['AutoApprovalDelayInSeconds'] = d + + # add these params to the others + params.update(additional_params) + + # add the annotation if specified + if annotation is not None: + params['RequesterAnnotation'] = annotation + + # Add the Qualifications if specified + if qualifications is not None: + params.update(qualifications.get_as_params()) + + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + # Submit + return self._process_request('CreateHIT', params, [('HIT', HIT)]) + + def change_hit_type_of_hit(self, hit_id, hit_type): + """ + Change the HIT type of an existing HIT. Note that the reward associated + with the new HIT type must match the reward of the current HIT type in + order for the operation to be valid. + + :type hit_id: str + :type hit_type: str + """ + params = {'HITId': hit_id, + 'HITTypeId': hit_type} + + return self._process_request('ChangeHITTypeOfHIT', params) + + def get_reviewable_hits(self, hit_type=None, status='Reviewable', + sort_by='Expiration', sort_direction='Ascending', + page_size=10, page_number=1): + """ + Retrieve the HITs that have a status of Reviewable, or HITs that + have a status of Reviewing, and that belong to the Requester + calling the operation. + """ + params = {'Status': status, + 'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number} + + # Handle optional hit_type argument + if hit_type is not None: + params.update({'HITTypeId': hit_type}) + + return self._process_request('GetReviewableHITs', params, + [('HIT', HIT)]) + + @staticmethod + def _get_pages(page_size, total_records): + """ + Given a page size (records per page) and a total number of + records, return the page numbers to be retrieved. + """ + pages = total_records / page_size + bool(total_records % page_size) + return list(range(1, pages + 1)) + + def get_all_hits(self): + """ + Return all of a Requester's HITs + + Despite what search_hits says, it does not return all hits, but + instead returns a page of hits. This method will pull the hits + from the server 100 at a time, but will yield the results + iteratively, so subsequent requests are made on demand. + """ + page_size = 100 + search_rs = self.search_hits(page_size=page_size) + total_records = int(search_rs.TotalNumResults) + get_page_hits = lambda page: self.search_hits(page_size=page_size, page_number=page) + page_nums = self._get_pages(page_size, total_records) + hit_sets = itertools.imap(get_page_hits, page_nums) + return itertools.chain.from_iterable(hit_sets) + + def search_hits(self, sort_by='CreationTime', sort_direction='Ascending', + page_size=10, page_number=1, response_groups=None): + """ + Return a page of a Requester's HITs, on behalf of the Requester. + The operation returns HITs of any status, except for HITs that + have been disposed with the DisposeHIT operation. + Note: + The SearchHITs operation does not accept any search parameters + that filter the results. + """ + params = {'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number} + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('SearchHITs', params, [('HIT', HIT)]) + + def get_assignment(self, assignment_id, response_groups=None): + """ + Retrieves an assignment using the assignment's ID. Requesters can only + retrieve their own assignments, and only assignments whose related HIT + has not been disposed. + + The returned ResultSet will have the following attributes: + + Request + This element is present only if the Request ResponseGroup + is specified. + Assignment + The assignment. The response includes one Assignment object. + HIT + The HIT associated with this assignment. The response + includes one HIT object. + + """ + + params = {'AssignmentId': assignment_id} + + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('GetAssignment', params, + [('Assignment', Assignment), + ('HIT', HIT)]) + + def get_assignments(self, hit_id, status=None, + sort_by='SubmitTime', sort_direction='Ascending', + page_size=10, page_number=1, response_groups=None): + """ + Retrieves completed assignments for a HIT. + Use this operation to retrieve the results for a HIT. + + The returned ResultSet will have the following attributes: + + NumResults + The number of assignments on the page in the filtered results + list, equivalent to the number of assignments being returned + by this call. + A non-negative integer, as a string. + PageNumber + The number of the page in the filtered results list being + returned. + A positive integer, as a string. + TotalNumResults + The total number of HITs in the filtered results list based + on this call. + A non-negative integer, as a string. + + The ResultSet will contain zero or more Assignment objects + + """ + params = {'HITId': hit_id, + 'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number} + + if status is not None: + params['AssignmentStatus'] = status + + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('GetAssignmentsForHIT', params, + [('Assignment', Assignment)]) + + def approve_assignment(self, assignment_id, feedback=None): + """ + """ + params = {'AssignmentId': assignment_id} + if feedback: + params['RequesterFeedback'] = feedback + return self._process_request('ApproveAssignment', params) + + def reject_assignment(self, assignment_id, feedback=None): + """ + """ + params = {'AssignmentId': assignment_id} + if feedback: + params['RequesterFeedback'] = feedback + return self._process_request('RejectAssignment', params) + + def approve_rejected_assignment(self, assignment_id, feedback=None): + """ + """ + params = {'AssignmentId': assignment_id} + if feedback: + params['RequesterFeedback'] = feedback + return self._process_request('ApproveRejectedAssignment', params) + + def get_file_upload_url(self, assignment_id, question_identifier): + """ + Generates and returns a temporary URL to an uploaded file. The + temporary URL is used to retrieve the file as an answer to a + FileUploadAnswer question, it is valid for 60 seconds. + + Will have a FileUploadURL attribute as per the API Reference. + """ + + params = {'AssignmentId': assignment_id, + 'QuestionIdentifier': question_identifier} + + return self._process_request('GetFileUploadURL', params, + [('FileUploadURL', FileUploadURL)]) + + def get_hit(self, hit_id, response_groups=None): + """ + """ + params = {'HITId': hit_id} + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('GetHIT', params, [('HIT', HIT)]) + + def set_reviewing(self, hit_id, revert=None): + """ + Update a HIT with a status of Reviewable to have a status of Reviewing, + or reverts a Reviewing HIT back to the Reviewable status. + + Only HITs with a status of Reviewable can be updated with a status of + Reviewing. Similarly, only Reviewing HITs can be reverted back to a + status of Reviewable. + """ + params = {'HITId': hit_id} + if revert: + params['Revert'] = revert + return self._process_request('SetHITAsReviewing', params) + + def disable_hit(self, hit_id, response_groups=None): + """ + Remove a HIT from the Mechanical Turk marketplace, approves all + submitted assignments that have not already been approved or rejected, + and disposes of the HIT and all assignment data. + + Assignments for the HIT that have already been submitted, but not yet + approved or rejected, will be automatically approved. Assignments in + progress at the time of the call to DisableHIT will be approved once + the assignments are submitted. You will be charged for approval of + these assignments. DisableHIT completely disposes of the HIT and + all submitted assignment data. Assignment results data cannot be + retrieved for a HIT that has been disposed. + + It is not possible to re-enable a HIT once it has been disabled. + To make the work from a disabled HIT available again, create a new HIT. + """ + params = {'HITId': hit_id} + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('DisableHIT', params) + + def dispose_hit(self, hit_id): + """ + Dispose of a HIT that is no longer needed. + + Only HITs in the "reviewable" state, with all submitted + assignments approved or rejected, can be disposed. A Requester + can call GetReviewableHITs to determine which HITs are + reviewable, then call GetAssignmentsForHIT to retrieve the + assignments. Disposing of a HIT removes the HIT from the + results of a call to GetReviewableHITs. """ + params = {'HITId': hit_id} + return self._process_request('DisposeHIT', params) + + def expire_hit(self, hit_id): + + """ + Expire a HIT that is no longer needed. + + The effect is identical to the HIT expiring on its own. The + HIT no longer appears on the Mechanical Turk web site, and no + new Workers are allowed to accept the HIT. Workers who have + accepted the HIT prior to expiration are allowed to complete + it or return it, or allow the assignment duration to elapse + (abandon the HIT). Once all remaining assignments have been + submitted, the expired HIT becomes"reviewable", and will be + returned by a call to GetReviewableHITs. + """ + params = {'HITId': hit_id} + return self._process_request('ForceExpireHIT', params) + + def extend_hit(self, hit_id, assignments_increment=None, + expiration_increment=None): + """ + Increase the maximum number of assignments, or extend the + expiration date, of an existing HIT. + + NOTE: If a HIT has a status of Reviewable and the HIT is + extended to make it Available, the HIT will not be returned by + GetReviewableHITs, and its submitted assignments will not be + returned by GetAssignmentsForHIT, until the HIT is Reviewable + again. Assignment auto-approval will still happen on its + original schedule, even if the HIT has been extended. Be sure + to retrieve and approve (or reject) submitted assignments + before extending the HIT, if so desired. + """ + # must provide assignment *or* expiration increment + if (assignments_increment is None and expiration_increment is None) or \ + (assignments_increment is not None and expiration_increment is not None): + raise ValueError("Must specify either assignments_increment or expiration_increment, but not both") + + params = {'HITId': hit_id} + if assignments_increment: + params['MaxAssignmentsIncrement'] = assignments_increment + if expiration_increment: + params['ExpirationIncrementInSeconds'] = expiration_increment + + return self._process_request('ExtendHIT', params) + + def get_help(self, about, help_type='Operation'): + """ + Return information about the Mechanical Turk Service + operations and response group NOTE - this is basically useless + as it just returns the URL of the documentation + + help_type: either 'Operation' or 'ResponseGroup' + """ + params = {'About': about, 'HelpType': help_type} + return self._process_request('Help', params) + + def grant_bonus(self, worker_id, assignment_id, bonus_price, reason): + """ + Issues a payment of money from your account to a Worker. To + be eligible for a bonus, the Worker must have submitted + results for one of your HITs, and have had those results + approved or rejected. This payment happens separately from the + reward you pay to the Worker when you approve the Worker's + assignment. The Bonus must be passed in as an instance of the + Price object. + """ + params = bonus_price.get_as_params('BonusAmount', 1) + params['WorkerId'] = worker_id + params['AssignmentId'] = assignment_id + params['Reason'] = reason + + return self._process_request('GrantBonus', params) + + def block_worker(self, worker_id, reason): + """ + Block a worker from working on my tasks. + """ + params = {'WorkerId': worker_id, 'Reason': reason} + + return self._process_request('BlockWorker', params) + + def unblock_worker(self, worker_id, reason): + """ + Unblock a worker from working on my tasks. + """ + params = {'WorkerId': worker_id, 'Reason': reason} + + return self._process_request('UnblockWorker', params) + + def notify_workers(self, worker_ids, subject, message_text): + """ + Send a text message to workers. + """ + params = {'Subject': subject, + 'MessageText': message_text} + self.build_list_params(params, worker_ids, 'WorkerId') + + return self._process_request('NotifyWorkers', params) + + def create_qualification_type(self, + name, + description, + status, + keywords=None, + retry_delay=None, + test=None, + answer_key=None, + answer_key_xml=None, + test_duration=None, + auto_granted=False, + auto_granted_value=1): + """ + Create a new Qualification Type. + + name: This will be visible to workers and must be unique for a + given requester. + + description: description shown to workers. Max 2000 characters. + + status: 'Active' or 'Inactive' + + keywords: list of keyword strings or comma separated string. + Max length of 1000 characters when concatenated with commas. + + retry_delay: number of seconds after requesting a + qualification the worker must wait before they can ask again. + If not specified, workers can only request this qualification + once. + + test: a QuestionForm + + answer_key: an XML string of your answer key, for automatically + scored qualification tests. + (Consider implementing an AnswerKey class for this to support.) + + test_duration: the number of seconds a worker has to complete the test. + + auto_granted: if True, requests for the Qualification are granted + immediately. Can't coexist with a test. + + auto_granted_value: auto_granted qualifications are given this value. + + """ + + params = {'Name': name, + 'Description': description, + 'QualificationTypeStatus': status, + } + if retry_delay is not None: + params['RetryDelayInSeconds'] = retry_delay + + if test is not None: + assert(isinstance(test, QuestionForm)) + assert(test_duration is not None) + params['Test'] = test.get_as_xml() + + if test_duration is not None: + params['TestDurationInSeconds'] = test_duration + + if answer_key is not None: + if isinstance(answer_key, basestring): + params['AnswerKey'] = answer_key # xml + else: + raise TypeError + # Eventually someone will write an AnswerKey class. + + if auto_granted: + assert(test is None) + params['AutoGranted'] = True + params['AutoGrantedValue'] = auto_granted_value + + if keywords: + params['Keywords'] = self.get_keywords_as_string(keywords) + + return self._process_request('CreateQualificationType', params, + [('QualificationType', + QualificationType)]) + + def get_qualification_type(self, qualification_type_id): + params = {'QualificationTypeId': qualification_type_id } + return self._process_request('GetQualificationType', params, + [('QualificationType', QualificationType)]) + + def get_all_qualifications_for_qual_type(self, qualification_type_id): + page_size = 100 + search_qual = self.get_qualifications_for_qualification_type(qualification_type_id) + total_records = int(search_qual.TotalNumResults) + get_page_quals = lambda page: self.get_qualifications_for_qualification_type(qualification_type_id = qualification_type_id, page_size=page_size, page_number = page) + page_nums = self._get_pages(page_size, total_records) + qual_sets = itertools.imap(get_page_quals, page_nums) + return itertools.chain.from_iterable(qual_sets) + + def get_qualifications_for_qualification_type(self, qualification_type_id, page_size=100, page_number = 1): + params = {'QualificationTypeId': qualification_type_id, + 'PageSize': page_size, + 'PageNumber': page_number} + return self._process_request('GetQualificationsForQualificationType', params, + [('Qualification', Qualification)]) + + def update_qualification_type(self, qualification_type_id, + description=None, + status=None, + retry_delay=None, + test=None, + answer_key=None, + test_duration=None, + auto_granted=None, + auto_granted_value=None): + + params = {'QualificationTypeId': qualification_type_id} + + if description is not None: + params['Description'] = description + + if status is not None: + params['QualificationTypeStatus'] = status + + if retry_delay is not None: + params['RetryDelayInSeconds'] = retry_delay + + if test is not None: + assert(isinstance(test, QuestionForm)) + params['Test'] = test.get_as_xml() + + if test_duration is not None: + params['TestDurationInSeconds'] = test_duration + + if answer_key is not None: + if isinstance(answer_key, basestring): + params['AnswerKey'] = answer_key # xml + else: + raise TypeError + # Eventually someone will write an AnswerKey class. + + if auto_granted is not None: + params['AutoGranted'] = auto_granted + + if auto_granted_value is not None: + params['AutoGrantedValue'] = auto_granted_value + + return self._process_request('UpdateQualificationType', params, + [('QualificationType', QualificationType)]) + + def dispose_qualification_type(self, qualification_type_id): + """TODO: Document.""" + params = {'QualificationTypeId': qualification_type_id} + return self._process_request('DisposeQualificationType', params) + + def search_qualification_types(self, query=None, sort_by='Name', + sort_direction='Ascending', page_size=10, + page_number=1, must_be_requestable=True, + must_be_owned_by_caller=True): + """TODO: Document.""" + params = {'Query': query, + 'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number, + 'MustBeRequestable': must_be_requestable, + 'MustBeOwnedByCaller': must_be_owned_by_caller} + return self._process_request('SearchQualificationTypes', params, + [('QualificationType', QualificationType)]) + + def get_qualification_requests(self, qualification_type_id, + sort_by='Expiration', + sort_direction='Ascending', page_size=10, + page_number=1): + """TODO: Document.""" + params = {'QualificationTypeId': qualification_type_id, + 'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number} + return self._process_request('GetQualificationRequests', params, + [('QualificationRequest', QualificationRequest)]) + + def grant_qualification(self, qualification_request_id, integer_value=1): + """TODO: Document.""" + params = {'QualificationRequestId': qualification_request_id, + 'IntegerValue': integer_value} + return self._process_request('GrantQualification', params) + + def revoke_qualification(self, subject_id, qualification_type_id, + reason=None): + """TODO: Document.""" + params = {'SubjectId': subject_id, + 'QualificationTypeId': qualification_type_id, + 'Reason': reason} + return self._process_request('RevokeQualification', params) + + def assign_qualification(self, qualification_type_id, worker_id, + value=1, send_notification=True): + params = {'QualificationTypeId': qualification_type_id, + 'WorkerId' : worker_id, + 'IntegerValue' : value, + 'SendNotification' : send_notification} + return self._process_request('AssignQualification', params) + + def get_qualification_score(self, qualification_type_id, worker_id): + """TODO: Document.""" + params = {'QualificationTypeId' : qualification_type_id, + 'SubjectId' : worker_id} + return self._process_request('GetQualificationScore', params, + [('Qualification', Qualification)]) + + def update_qualification_score(self, qualification_type_id, worker_id, + value): + """TODO: Document.""" + params = {'QualificationTypeId' : qualification_type_id, + 'SubjectId' : worker_id, + 'IntegerValue' : value} + return self._process_request('UpdateQualificationScore', params) + + def _process_request(self, request_type, params, marker_elems=None): + """ + Helper to process the xml response from AWS + """ + params['Operation'] = request_type + response = self.make_request(None, params, verb='POST') + return self._process_response(response, marker_elems) + + def _process_response(self, response, marker_elems=None): + """ + Helper to process the xml response from AWS + """ + body = response.read() + if self.debug == 2: + print(body) + if '' not in body: + rs = ResultSet(marker_elems) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + else: + raise MTurkRequestError(response.status, response.reason, body) + + @staticmethod + def get_keywords_as_string(keywords): + """ + Returns a comma+space-separated string of keywords from either + a list or a string + """ + if isinstance(keywords, list): + keywords = ', '.join(keywords) + if isinstance(keywords, str): + final_keywords = keywords + elif isinstance(keywords, unicode): + final_keywords = keywords.encode('utf-8') + elif keywords is None: + final_keywords = "" + else: + raise TypeError("keywords argument must be a string or a list of strings; got a %s" % type(keywords)) + return final_keywords + + @staticmethod + def get_price_as_price(reward): + """ + Returns a Price data structure from either a float or a Price + """ + if isinstance(reward, Price): + final_price = reward + else: + final_price = Price(reward) + return final_price + + @staticmethod + def duration_as_seconds(duration): + if isinstance(duration, datetime.timedelta): + duration = duration.days * 86400 + duration.seconds + try: + duration = int(duration) + except TypeError: + raise TypeError("Duration must be a timedelta or int-castable, got %s" % type(duration)) + return duration + + +class BaseAutoResultElement(object): + """ + Base class to automatically add attributes when parsing XML + """ + def __init__(self, connection): + pass + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class HIT(BaseAutoResultElement): + """ + Class to extract a HIT structure from a response (used in ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. HITId, HITTypeId, CreationTime + """ + + # property helper to determine if HIT has expired + def _has_expired(self): + """ Has this HIT expired yet? """ + expired = False + if hasattr(self, 'Expiration'): + now = datetime.datetime.utcnow() + expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ') + expired = (now >= expiration) + else: + raise ValueError("ERROR: Request for expired property, but no Expiration in HIT!") + return expired + + # are we there yet? + expired = property(_has_expired) + + +class FileUploadURL(BaseAutoResultElement): + """ + Class to extract an FileUploadURL structure from a response + """ + + pass + + +class HITTypeId(BaseAutoResultElement): + """ + Class to extract an HITTypeId structure from a response + """ + + pass + + +class Qualification(BaseAutoResultElement): + """ + Class to extract an Qualification structure from a response (used in + ResultSet) + + Will have attributes named as per the Developer Guide such as + QualificationTypeId, IntegerValue. Does not seem to contain GrantTime. + """ + + pass + + +class QualificationType(BaseAutoResultElement): + """ + Class to extract an QualificationType structure from a response (used in + ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. QualificationTypeId, CreationTime, Name, etc + """ + + pass + + +class QualificationRequest(BaseAutoResultElement): + """ + Class to extract an QualificationRequest structure from a response (used in + ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. QualificationRequestId, QualificationTypeId, SubjectId, etc + """ + + def __init__(self, connection): + super(QualificationRequest, self).__init__(connection) + self.answers = [] + + def endElement(self, name, value, connection): + # the answer consists of embedded XML, so it needs to be parsed independantly + if name == 'Answer': + answer_rs = ResultSet([('Answer', QuestionFormAnswer)]) + h = handler.XmlHandler(answer_rs, connection) + value = connection.get_utf8_value(value) + xml.sax.parseString(value, h) + self.answers.append(answer_rs) + else: + super(QualificationRequest, self).endElement(name, value, connection) + + +class Assignment(BaseAutoResultElement): + """ + Class to extract an Assignment structure from a response (used in + ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. AssignmentId, WorkerId, HITId, Answer, etc + """ + + def __init__(self, connection): + super(Assignment, self).__init__(connection) + self.answers = [] + + def endElement(self, name, value, connection): + # the answer consists of embedded XML, so it needs to be parsed independantly + if name == 'Answer': + answer_rs = ResultSet([('Answer', QuestionFormAnswer)]) + h = handler.XmlHandler(answer_rs, connection) + value = connection.get_utf8_value(value) + xml.sax.parseString(value, h) + self.answers.append(answer_rs) + else: + super(Assignment, self).endElement(name, value, connection) + + +class QuestionFormAnswer(BaseAutoResultElement): + """ + Class to extract Answers from inside the embedded XML + QuestionFormAnswers element inside the Answer element which is + part of the Assignment and QualificationRequest structures + + A QuestionFormAnswers element contains an Answer element for each + question in the HIT or Qualification test for which the Worker + provided an answer. Each Answer contains a QuestionIdentifier + element whose value corresponds to the QuestionIdentifier of a + Question in the QuestionForm. See the QuestionForm data structure + for more information about questions and answer specifications. + + If the question expects a free-text answer, the Answer element + contains a FreeText element. This element contains the Worker's + answer + + *NOTE* - currently really only supports free-text and selection answers + """ + + def __init__(self, connection): + super(QuestionFormAnswer, self).__init__(connection) + self.fields = [] + self.qid = None + + def endElement(self, name, value, connection): + if name == 'QuestionIdentifier': + self.qid = value + elif name in ['FreeText', 'SelectionIdentifier', 'OtherSelectionText'] and self.qid: + self.fields.append(value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mturk/layoutparam.py b/desktop/core/ext-py/boto-2.38.0/boto/mturk/layoutparam.py new file mode 100644 index 0000000000000000000000000000000000000000..de7989554e5e55dba994b8b18b3a4d13445204a8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mturk/layoutparam.py @@ -0,0 +1,55 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class LayoutParameters(object): + + def __init__(self, layoutParameters=None): + if layoutParameters is None: + layoutParameters = [] + self.layoutParameters = layoutParameters + + def add(self, req): + self.layoutParameters.append(req) + + def get_as_params(self): + params = {} + assert(len(self.layoutParameters) <= 25) + for n, layoutParameter in enumerate(self.layoutParameters): + kv = layoutParameter.get_as_params() + for key in kv: + params['HITLayoutParameter.%s.%s' % ((n+1), key) ] = kv[key] + return params + +class LayoutParameter(object): + """ + Representation of a single HIT layout parameter + """ + + def __init__(self, name, value): + self.name = name + self.value = value + + def get_as_params(self): + params = { + "Name": self.name, + "Value": self.value, + } + return params diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mturk/notification.py b/desktop/core/ext-py/boto-2.38.0/boto/mturk/notification.py new file mode 100644 index 0000000000000000000000000000000000000000..118daaab956e284538ea4599c5b26666c77e3acb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mturk/notification.py @@ -0,0 +1,103 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Provides NotificationMessage and Event classes, with utility methods, for +implementations of the Mechanical Turk Notification API. +""" + +import hmac +try: + from hashlib import sha1 as sha +except ImportError: + import sha +import base64 +import re + +class NotificationMessage(object): + + NOTIFICATION_WSDL = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurk/2006-05-05/AWSMechanicalTurkRequesterNotification.wsdl" + NOTIFICATION_VERSION = '2006-05-05' + + SERVICE_NAME = "AWSMechanicalTurkRequesterNotification" + OPERATION_NAME = "Notify" + + EVENT_PATTERN = r"Event\.(?P\d+)\.(?P\w+)" + EVENT_RE = re.compile(EVENT_PATTERN) + + def __init__(self, d): + """ + Constructor; expects parameter d to be a dict of string parameters from a REST transport notification message + """ + self.signature = d['Signature'] # vH6ZbE0NhkF/hfNyxz2OgmzXYKs= + self.timestamp = d['Timestamp'] # 2006-05-23T23:22:30Z + self.version = d['Version'] # 2006-05-05 + assert d['method'] == NotificationMessage.OPERATION_NAME, "Method should be '%s'" % NotificationMessage.OPERATION_NAME + + # Build Events + self.events = [] + events_dict = {} + if 'Event' in d: + # TurboGears surprised me by 'doing the right thing' and making { 'Event': { '1': { 'EventType': ... } } } etc. + events_dict = d['Event'] + else: + for k in d: + v = d[k] + if k.startswith('Event.'): + ed = NotificationMessage.EVENT_RE.search(k).groupdict() + n = int(ed['n']) + param = str(ed['param']) + if n not in events_dict: + events_dict[n] = {} + events_dict[n][param] = v + for n in events_dict: + self.events.append(Event(events_dict[n])) + + def verify(self, secret_key): + """ + Verifies the authenticity of a notification message. + + TODO: This is doing a form of authentication and + this functionality should really be merged + with the pluggable authentication mechanism + at some point. + """ + verification_input = NotificationMessage.SERVICE_NAME + verification_input += NotificationMessage.OPERATION_NAME + verification_input += self.timestamp + h = hmac.new(key=secret_key, digestmod=sha) + h.update(verification_input) + signature_calc = base64.b64encode(h.digest()) + return self.signature == signature_calc + +class Event(object): + def __init__(self, d): + self.event_type = d['EventType'] + self.event_time_str = d['EventTime'] + self.hit_type = d['HITTypeId'] + self.hit_id = d['HITId'] + if 'AssignmentId' in d: # Not present in all event types + self.assignment_id = d['AssignmentId'] + + #TODO: build self.event_time datetime from string self.event_time_str + + def __repr__(self): + return "" % (self.event_type, self.hit_id) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mturk/price.py b/desktop/core/ext-py/boto-2.38.0/boto/mturk/price.py new file mode 100644 index 0000000000000000000000000000000000000000..8e194e422e503f48055af04b3ea402d513682ec0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mturk/price.py @@ -0,0 +1,48 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Price(object): + + def __init__(self, amount=0.0, currency_code='USD'): + self.amount = amount + self.currency_code = currency_code + self.formatted_price = '' + + def __repr__(self): + if self.formatted_price: + return self.formatted_price + else: + return str(self.amount) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Amount': + self.amount = float(value) + elif name == 'CurrencyCode': + self.currency_code = value + elif name == 'FormattedPrice': + self.formatted_price = value + + def get_as_params(self, label, ord=1): + return {'%s.%d.Amount'%(label, ord) : str(self.amount), + '%s.%d.CurrencyCode'%(label, ord) : self.currency_code} diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mturk/qualification.py b/desktop/core/ext-py/boto-2.38.0/boto/mturk/qualification.py new file mode 100644 index 0000000000000000000000000000000000000000..4fc230f9dfb055a99eb5ecd0bf5fc0177b6d6958 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mturk/qualification.py @@ -0,0 +1,137 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Qualifications(object): + + def __init__(self, requirements=None): + if requirements is None: + requirements = [] + self.requirements = requirements + + def add(self, req): + self.requirements.append(req) + + def get_as_params(self): + params = {} + assert(len(self.requirements) <= 10) + for n, req in enumerate(self.requirements): + reqparams = req.get_as_params() + for rp in reqparams: + params['QualificationRequirement.%s.%s' % ((n+1), rp) ] = reqparams[rp] + return params + + +class Requirement(object): + """ + Representation of a single requirement + """ + + def __init__(self, qualification_type_id, comparator, integer_value=None, required_to_preview=False): + self.qualification_type_id = qualification_type_id + self.comparator = comparator + self.integer_value = integer_value + self.required_to_preview = required_to_preview + + def get_as_params(self): + params = { + "QualificationTypeId": self.qualification_type_id, + "Comparator": self.comparator, + } + if self.comparator != 'Exists' and self.integer_value is not None: + params['IntegerValue'] = self.integer_value + if self.required_to_preview: + params['RequiredToPreview'] = "true" + return params + +class PercentAssignmentsSubmittedRequirement(Requirement): + """ + The percentage of assignments the Worker has submitted, over all assignments the Worker has accepted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(PercentAssignmentsSubmittedRequirement, self).__init__(qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsAbandonedRequirement(Requirement): + """ + The percentage of assignments the Worker has abandoned (allowed the deadline to elapse), over all assignments the Worker has accepted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(PercentAssignmentsAbandonedRequirement, self).__init__(qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsReturnedRequirement(Requirement): + """ + The percentage of assignments the Worker has returned, over all assignments the Worker has accepted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(PercentAssignmentsReturnedRequirement, self).__init__(qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsApprovedRequirement(Requirement): + """ + The percentage of assignments the Worker has submitted that were subsequently approved by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(PercentAssignmentsApprovedRequirement, self).__init__(qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsRejectedRequirement(Requirement): + """ + The percentage of assignments the Worker has submitted that were subsequently rejected by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(PercentAssignmentsRejectedRequirement, self).__init__(qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class NumberHitsApprovedRequirement(Requirement): + """ + Specifies the total number of HITs submitted by a Worker that have been approved. The value is an integer greater than or equal to 0. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(NumberHitsApprovedRequirement, self).__init__(qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class LocaleRequirement(Requirement): + """ + A Qualification requirement based on the Worker's location. The Worker's location is specified by the Worker to Mechanical Turk when the Worker creates his account. + """ + + def __init__(self, comparator, locale, required_to_preview=False): + super(LocaleRequirement, self).__init__(qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview) + self.locale = locale + + def get_as_params(self): + params = { + "QualificationTypeId": self.qualification_type_id, + "Comparator": self.comparator, + 'LocaleValue.Country': self.locale, + } + if self.required_to_preview: + params['RequiredToPreview'] = "true" + return params + +class AdultRequirement(Requirement): + """ + Requires workers to acknowledge that they are over 18 and that they agree to work on potentially offensive content. The value type is boolean, 1 (required), 0 (not required, the default). + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(AdultRequirement, self).__init__(qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mturk/question.py b/desktop/core/ext-py/boto-2.38.0/boto/mturk/question.py new file mode 100644 index 0000000000000000000000000000000000000000..293b0782edf72923b2ed8412975b1708f23b06ee --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mturk/question.py @@ -0,0 +1,455 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax.saxutils + +class Question(object): + template = "%(items)s" + + def __init__(self, identifier, content, answer_spec, + is_required=False, display_name=None): + # copy all of the parameters into object attributes + self.__dict__.update(vars()) + del self.self + + def get_as_params(self, label='Question'): + return {label: self.get_as_xml()} + + def get_as_xml(self): + items = [ + SimpleField('QuestionIdentifier', self.identifier), + SimpleField('IsRequired', str(self.is_required).lower()), + self.content, + self.answer_spec, + ] + if self.display_name is not None: + items.insert(1, SimpleField('DisplayName', self.display_name)) + items = ''.join(item.get_as_xml() for item in items) + return self.template % vars() + +try: + from lxml import etree + + class ValidatingXML(object): + + def validate(self): + import urllib2 + schema_src_file = urllib2.urlopen(self.schema_url) + schema_doc = etree.parse(schema_src_file) + schema = etree.XMLSchema(schema_doc) + doc = etree.fromstring(self.get_as_xml()) + schema.assertValid(doc) +except ImportError: + class ValidatingXML(object): + + def validate(self): + pass + + +class ExternalQuestion(ValidatingXML): + """ + An object for constructing an External Question. + """ + schema_url = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd" + template = '%%(external_url)s%%(frame_height)s' % vars() + + def __init__(self, external_url, frame_height): + self.external_url = xml.sax.saxutils.escape( external_url ) + self.frame_height = frame_height + + def get_as_params(self, label='ExternalQuestion'): + return {label: self.get_as_xml()} + + def get_as_xml(self): + return self.template % vars(self) + + +class XMLTemplate(object): + def get_as_xml(self): + return self.template % vars(self) + + +class SimpleField(XMLTemplate): + """ + A Simple name/value pair that can be easily rendered as XML. + + >>> SimpleField('Text', 'A text string').get_as_xml() + 'A text string' + """ + template = '<%(field)s>%(value)s' + + def __init__(self, field, value): + self.field = field + self.value = value + + +class Binary(XMLTemplate): + template = """%(type)s%(subtype)s%(url)s%(alt_text)s""" + + def __init__(self, type, subtype, url, alt_text): + self.__dict__.update(vars()) + del self.self + + +class List(list): + """A bulleted list suitable for OrderedContent or Overview content""" + def get_as_xml(self): + items = ''.join('%s' % item for item in self) + return '%s' % items + + +class Application(object): + template = "<%(class_)s>%(content)s" + parameter_template = "%(name)s%(value)s" + + def __init__(self, width, height, **parameters): + self.width = width + self.height = height + self.parameters = parameters + + def get_inner_content(self, content): + content.append_field('Width', self.width) + content.append_field('Height', self.height) + for name, value in self.parameters.items(): + value = self.parameter_template % vars() + content.append_field('ApplicationParameter', value) + + def get_as_xml(self): + content = OrderedContent() + self.get_inner_content(content) + content = content.get_as_xml() + class_ = self.__class__.__name__ + return self.template % vars() + + +class HTMLQuestion(ValidatingXML): + schema_url = 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd' + template = '%%(html_form)s]]>%%(frame_height)s' % vars() + + def __init__(self, html_form, frame_height): + self.html_form = html_form + self.frame_height = frame_height + + def get_as_params(self, label="HTMLQuestion"): + return {label: self.get_as_xml()} + + def get_as_xml(self): + return self.template % vars(self) + + +class JavaApplet(Application): + def __init__(self, path, filename, *args, **kwargs): + self.path = path + self.filename = filename + super(JavaApplet, self).__init__(*args, **kwargs) + + def get_inner_content(self, content): + content = OrderedContent() + content.append_field('AppletPath', self.path) + content.append_field('AppletFilename', self.filename) + super(JavaApplet, self).get_inner_content(content) + + +class Flash(Application): + def __init__(self, url, *args, **kwargs): + self.url = url + super(Flash, self).__init__(*args, **kwargs) + + def get_inner_content(self, content): + content = OrderedContent() + content.append_field('FlashMovieURL', self.url) + super(Flash, self).get_inner_content(content) + + +class FormattedContent(XMLTemplate): + schema_url = 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/FormattedContentXHTMLSubset.xsd' + template = '' + + def __init__(self, content): + self.content = content + + +class OrderedContent(list): + + def append_field(self, field, value): + self.append(SimpleField(field, value)) + + def get_as_xml(self): + return ''.join(item.get_as_xml() for item in self) + + +class Overview(OrderedContent): + template = '%(content)s' + + def get_as_params(self, label='Overview'): + return {label: self.get_as_xml()} + + def get_as_xml(self): + content = super(Overview, self).get_as_xml() + return self.template % vars() + + +class QuestionForm(ValidatingXML, list): + """ + From the AMT API docs: + + The top-most element of the QuestionForm data structure is a + QuestionForm element. This element contains optional Overview + elements and one or more Question elements. There can be any + number of these two element types listed in any order. The + following example structure has an Overview element and a + Question element followed by a second Overview element and + Question element--all within the same QuestionForm. + + :: + + + + [...] + + + [...] + + + [...] + + + [...] + + [...] + + + QuestionForm is implemented as a list, so to construct a + QuestionForm, simply append Questions and Overviews (with at least + one Question). + """ + schema_url = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2005-10-01/QuestionForm.xsd" + xml_template = """%%(items)s""" % vars() + + def is_valid(self): + return ( + any(isinstance(item, Question) for item in self) + and + all(isinstance(item, (Question, Overview)) for item in self) + ) + + def get_as_xml(self): + assert self.is_valid(), "QuestionForm contains invalid elements" + items = ''.join(item.get_as_xml() for item in self) + return self.xml_template % vars() + + +class QuestionContent(OrderedContent): + template = '%(content)s' + + def get_as_xml(self): + content = super(QuestionContent, self).get_as_xml() + return self.template % vars() + + +class AnswerSpecification(object): + template = '%(spec)s' + + def __init__(self, spec): + self.spec = spec + + def get_as_xml(self): + spec = self.spec.get_as_xml() + return self.template % vars() + + +class Constraints(OrderedContent): + template = '%(content)s' + + def get_as_xml(self): + content = super(Constraints, self).get_as_xml() + return self.template % vars() + + +class Constraint(object): + def get_attributes(self): + pairs = zip(self.attribute_names, self.attribute_values) + attrs = ' '.join( + '%s="%d"' % (name, value) + for (name, value) in pairs + if value is not None + ) + return attrs + + def get_as_xml(self): + attrs = self.get_attributes() + return self.template % vars() + + +class NumericConstraint(Constraint): + attribute_names = 'minValue', 'maxValue' + template = '' + + def __init__(self, min_value=None, max_value=None): + self.attribute_values = min_value, max_value + + +class LengthConstraint(Constraint): + attribute_names = 'minLength', 'maxLength' + template = '' + + def __init__(self, min_length=None, max_length=None): + self.attribute_values = min_length, max_length + + +class RegExConstraint(Constraint): + attribute_names = 'regex', 'errorText', 'flags' + template = '' + + def __init__(self, pattern, error_text=None, flags=None): + self.attribute_values = pattern, error_text, flags + + def get_attributes(self): + pairs = zip(self.attribute_names, self.attribute_values) + attrs = ' '.join( + '%s="%s"' % (name, value) + for (name, value) in pairs + if value is not None + ) + return attrs + + +class NumberOfLinesSuggestion(object): + template = '%(num_lines)s' + + def __init__(self, num_lines=1): + self.num_lines = num_lines + + def get_as_xml(self): + num_lines = self.num_lines + return self.template % vars() + + +class FreeTextAnswer(object): + template = '%(items)s' + + def __init__(self, default=None, constraints=None, num_lines=None): + self.default = default + if constraints is None: + self.constraints = Constraints() + else: + self.constraints = Constraints(constraints) + self.num_lines = num_lines + + def get_as_xml(self): + items = [self.constraints] + if self.default: + items.append(SimpleField('DefaultText', self.default)) + if self.num_lines: + items.append(NumberOfLinesSuggestion(self.num_lines)) + items = ''.join(item.get_as_xml() for item in items) + return self.template % vars() + + +class FileUploadAnswer(object): + template = """%(max_bytes)d%(min_bytes)d""" + + def __init__(self, min_bytes, max_bytes): + assert 0 <= min_bytes <= max_bytes <= 2 * 10 ** 9 + self.min_bytes = min_bytes + self.max_bytes = max_bytes + + def get_as_xml(self): + return self.template % vars(self) + + +class SelectionAnswer(object): + """ + A class to generate SelectionAnswer XML data structures. + Does not yet implement Binary selection options. + """ + SELECTIONANSWER_XML_TEMPLATE = """%s%s%s""" # % (count_xml, style_xml, selections_xml) + SELECTION_XML_TEMPLATE = """%s%s""" # (identifier, value_xml) + SELECTION_VALUE_XML_TEMPLATE = """<%s>%s""" # (type, value, type) + STYLE_XML_TEMPLATE = """%s""" # (style) + MIN_SELECTION_COUNT_XML_TEMPLATE = """%s""" # count + MAX_SELECTION_COUNT_XML_TEMPLATE = """%s""" # count + ACCEPTED_STYLES = ['radiobutton', 'dropdown', 'checkbox', 'list', 'combobox', 'multichooser'] + OTHER_SELECTION_ELEMENT_NAME = 'OtherSelection' + + def __init__(self, min=1, max=1, style=None, selections=None, type='text', other=False): + + if style is not None: + if style in SelectionAnswer.ACCEPTED_STYLES: + self.style_suggestion = style + else: + raise ValueError("style '%s' not recognized; should be one of %s" % (style, ', '.join(SelectionAnswer.ACCEPTED_STYLES))) + else: + self.style_suggestion = None + + if selections is None: + raise ValueError("SelectionAnswer.__init__(): selections must be a non-empty list of (content, identifier) tuples") + else: + self.selections = selections + + self.min_selections = min + self.max_selections = max + + assert len(selections) >= self.min_selections, "# of selections is less than minimum of %d" % self.min_selections + #assert len(selections) <= self.max_selections, "# of selections exceeds maximum of %d" % self.max_selections + + self.type = type + + self.other = other + + def get_as_xml(self): + if self.type == 'text': + TYPE_TAG = "Text" + elif self.type == 'binary': + TYPE_TAG = "Binary" + else: + raise ValueError("illegal type: %s; must be either 'text' or 'binary'" % str(self.type)) + + # build list of elements + selections_xml = "" + for tpl in self.selections: + value_xml = SelectionAnswer.SELECTION_VALUE_XML_TEMPLATE % (TYPE_TAG, tpl[0], TYPE_TAG) + selection_xml = SelectionAnswer.SELECTION_XML_TEMPLATE % (tpl[1], value_xml) + selections_xml += selection_xml + + if self.other: + # add OtherSelection element as xml if available + if hasattr(self.other, 'get_as_xml'): + assert isinstance(self.other, FreeTextAnswer), 'OtherSelection can only be a FreeTextAnswer' + selections_xml += self.other.get_as_xml().replace('FreeTextAnswer', 'OtherSelection') + else: + selections_xml += "" + + if self.style_suggestion is not None: + style_xml = SelectionAnswer.STYLE_XML_TEMPLATE % self.style_suggestion + else: + style_xml = "" + + if self.style_suggestion != 'radiobutton': + count_xml = SelectionAnswer.MIN_SELECTION_COUNT_XML_TEMPLATE %self.min_selections + count_xml += SelectionAnswer.MAX_SELECTION_COUNT_XML_TEMPLATE %self.max_selections + else: + count_xml = "" + + ret = SelectionAnswer.SELECTIONANSWER_XML_TEMPLATE % (count_xml, style_xml, selections_xml) + + # return XML + return ret diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mws/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/mws/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d69b7f08a42b9f2e8574337e9d1ff29e1b1b1e40 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mws/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2008, Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mws/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/mws/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..687fae74f0baec58395853266b0e596ad88513af --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mws/connection.py @@ -0,0 +1,1168 @@ +# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import xml.sax +import hashlib +import string +import collections +from boto.connection import AWSQueryConnection +from boto.exception import BotoServerError +import boto.mws.exception +import boto.mws.response +from boto.handler import XmlHandler +from boto.compat import filter, map, six, encodebytes + +__all__ = ['MWSConnection'] + +api_version_path = { + 'Feeds': ('2009-01-01', 'Merchant', '/'), + 'Reports': ('2009-01-01', 'Merchant', '/'), + 'Orders': ('2013-09-01', 'SellerId', '/Orders/2013-09-01'), + 'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'), + 'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'), + 'Inbound': ('2010-10-01', 'SellerId', + '/FulfillmentInboundShipment/2010-10-01'), + 'Outbound': ('2010-10-01', 'SellerId', + '/FulfillmentOutboundShipment/2010-10-01'), + 'Inventory': ('2010-10-01', 'SellerId', + '/FulfillmentInventory/2010-10-01'), + 'Recommendations': ('2013-04-01', 'SellerId', + '/Recommendations/2013-04-01'), + 'CustomerInfo': ('2014-03-01', 'SellerId', + '/CustomerInformation/2014-03-01'), + 'CartInfo': ('2014-03-01', 'SellerId', + '/CartInformation/2014-03-01'), + 'Subscriptions': ('2013-07-01', 'SellerId', + '/Subscriptions/2013-07-01'), + 'OffAmazonPayments': ('2013-01-01', 'SellerId', + '/OffAmazonPayments/2013-01-01'), +} +content_md5 = lambda c: encodebytes(hashlib.md5(c).digest()).strip() +decorated_attrs = ('action', 'response', 'section', + 'quota', 'restore', 'version') +api_call_map = {} + + +def add_attrs_from(func, to): + for attr in decorated_attrs: + setattr(to, attr, getattr(func, attr, None)) + to.__wrapped__ = func + return to + + +def structured_lists(*fields): + + def decorator(func): + + def wrapper(self, *args, **kw): + for key, acc in [f.split('.') for f in fields]: + if key in kw: + newkey = key + '.' + acc + (acc and '.' or '') + for i in range(len(kw[key])): + kw[newkey + str(i + 1)] = kw[key][i] + kw.pop(key) + return func(self, *args, **kw) + wrapper.__doc__ = "{0}\nLists: {1}".format(func.__doc__, + ', '.join(fields)) + return add_attrs_from(func, to=wrapper) + return decorator + + +def http_body(field): + + def decorator(func): + + def wrapper(*args, **kw): + if any([f not in kw for f in (field, 'content_type')]): + message = "{0} requires {1} and content_type arguments for " \ + "building HTTP body".format(func.action, field) + raise KeyError(message) + kw['body'] = kw.pop(field) + kw['headers'] = { + 'Content-Type': kw.pop('content_type'), + 'Content-MD5': content_md5(kw['body']), + } + return func(*args, **kw) + wrapper.__doc__ = "{0}\nRequired HTTP Body: " \ + "{1}".format(func.__doc__, field) + return add_attrs_from(func, to=wrapper) + return decorator + + +def destructure_object(value, into, prefix, members=False): + if isinstance(value, boto.mws.response.ResponseElement): + destructure_object(value.__dict__, into, prefix, members=members) + elif isinstance(value, collections.Mapping): + for name in value: + if name.startswith('_'): + continue + destructure_object(value[name], into, prefix + '.' + name, + members=members) + elif isinstance(value, six.string_types): + into[prefix] = value + elif isinstance(value, collections.Iterable): + for index, element in enumerate(value): + suffix = (members and '.member.' or '.') + str(index + 1) + destructure_object(element, into, prefix + suffix, + members=members) + elif isinstance(value, bool): + into[prefix] = str(value).lower() + else: + into[prefix] = value + + +def structured_objects(*fields, **kwargs): + + def decorator(func): + + def wrapper(*args, **kw): + members = kwargs.get('members', False) + for field in filter(lambda i: i in kw, fields): + destructure_object(kw.pop(field), kw, field, members=members) + return func(*args, **kw) + wrapper.__doc__ = "{0}\nElement|Iter|Map: {1}\n" \ + "(ResponseElement or anything iterable/dict-like)" \ + .format(func.__doc__, ', '.join(fields)) + return add_attrs_from(func, to=wrapper) + return decorator + + +def requires(*groups): + + def decorator(func): + + def requires(*args, **kw): + hasgroup = lambda group: all(key in kw for key in group) + if 1 != len(list(filter(hasgroup, groups))): + message = ' OR '.join(['+'.join(g) for g in groups]) + message = "{0} requires {1} argument(s)" \ + "".format(func.action, message) + raise KeyError(message) + return func(*args, **kw) + message = ' OR '.join(['+'.join(g) for g in groups]) + requires.__doc__ = "{0}\nRequired: {1}".format(func.__doc__, + message) + return add_attrs_from(func, to=requires) + return decorator + + +def exclusive(*groups): + + def decorator(func): + + def wrapper(*args, **kw): + hasgroup = lambda group: all(key in kw for key in group) + if len(list(filter(hasgroup, groups))) not in (0, 1): + message = ' OR '.join(['+'.join(g) for g in groups]) + message = "{0} requires either {1}" \ + "".format(func.action, message) + raise KeyError(message) + return func(*args, **kw) + message = ' OR '.join(['+'.join(g) for g in groups]) + wrapper.__doc__ = "{0}\nEither: {1}".format(func.__doc__, + message) + return add_attrs_from(func, to=wrapper) + return decorator + + +def dependent(field, *groups): + + def decorator(func): + + def wrapper(*args, **kw): + hasgroup = lambda group: all(key in kw for key in group) + if field in kw and not any(hasgroup(g) for g in groups): + message = ' OR '.join(['+'.join(g) for g in groups]) + message = "{0} argument {1} requires {2}" \ + "".format(func.action, field, message) + raise KeyError(message) + return func(*args, **kw) + message = ' OR '.join(['+'.join(g) for g in groups]) + wrapper.__doc__ = "{0}\n{1} requires: {2}".format(func.__doc__, + field, + message) + return add_attrs_from(func, to=wrapper) + return decorator + + +def requires_some_of(*fields): + + def decorator(func): + + def requires(*args, **kw): + if not any(i in kw for i in fields): + message = "{0} requires at least one of {1} argument(s)" \ + "".format(func.action, ', '.join(fields)) + raise KeyError(message) + return func(*args, **kw) + requires.__doc__ = "{0}\nSome Required: {1}".format(func.__doc__, + ', '.join(fields)) + return add_attrs_from(func, to=requires) + return decorator + + +def boolean_arguments(*fields): + + def decorator(func): + + def wrapper(*args, **kw): + for field in [f for f in fields if isinstance(kw.get(f), bool)]: + kw[field] = str(kw[field]).lower() + return func(*args, **kw) + wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__, + ', '.join(fields)) + return add_attrs_from(func, to=wrapper) + return decorator + + +def api_action(section, quota, restore, *api): + + def decorator(func, quota=int(quota), restore=float(restore)): + version, accesskey, path = api_version_path[section] + action = ''.join(api or map(str.capitalize, func.__name__.split('_'))) + + def wrapper(self, *args, **kw): + kw.setdefault(accesskey, getattr(self, accesskey, None)) + if kw[accesskey] is None: + message = "{0} requires {1} argument. Set the " \ + "MWSConnection.{2} attribute?" \ + "".format(action, accesskey, accesskey) + raise KeyError(message) + kw['Action'] = action + kw['Version'] = version + response = self._response_factory(action, connection=self) + request = dict(path=path, quota=quota, restore=restore) + return func(self, request, response, *args, **kw) + for attr in decorated_attrs: + setattr(wrapper, attr, locals().get(attr)) + wrapper.__doc__ = "MWS {0}/{1} API call; quota={2} restore={3:.2f}\n" \ + "{4}".format(action, version, quota, restore, + func.__doc__) + api_call_map[action] = func.__name__ + return wrapper + return decorator + + +class MWSConnection(AWSQueryConnection): + + ResponseFactory = boto.mws.response.ResponseFactory + ResponseErrorFactory = boto.mws.exception.ResponseErrorFactory + + def __init__(self, *args, **kw): + kw.setdefault('host', 'mws.amazonservices.com') + self._sandboxed = kw.pop('sandbox', False) + self.Merchant = kw.pop('Merchant', None) or kw.get('SellerId') + self.SellerId = kw.pop('SellerId', None) or self.Merchant + kw = self._setup_factories(kw.pop('factory_scopes', []), **kw) + super(MWSConnection, self).__init__(*args, **kw) + + def _setup_factories(self, extrascopes, **kw): + for factory, (scope, Default) in { + 'response_factory': + (boto.mws.response, self.ResponseFactory), + 'response_error_factory': + (boto.mws.exception, self.ResponseErrorFactory), + }.items(): + if factory in kw: + setattr(self, '_' + factory, kw.pop(factory)) + else: + scopes = extrascopes + [scope] + setattr(self, '_' + factory, Default(scopes=scopes)) + return kw + + def _sandboxify(self, path): + if not self._sandboxed: + return path + splat = path.split('/') + splat[-2] += '_Sandbox' + return '/'.join(splat) + + def _required_auth_capability(self): + return ['mws'] + + def _post_request(self, request, params, parser, body='', headers=None): + """Make a POST request, optionally with a content body, + and return the response, optionally as raw text. + """ + headers = headers or {} + path = self._sandboxify(request['path']) + request = self.build_base_http_request('POST', path, None, data=body, + params=params, headers=headers, + host=self.host) + try: + response = self._mexe(request, override_num_retries=None) + except BotoServerError as bs: + raise self._response_error_factory(bs.status, bs.reason, bs.body) + body = response.read() + boto.log.debug(body) + if not body: + boto.log.error('Null body %s' % body) + raise self._response_error_factory(response.status, + response.reason, body) + if response.status != 200: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self._response_error_factory(response.status, + response.reason, body) + digest = response.getheader('Content-MD5') + if digest is not None: + assert content_md5(body) == digest + contenttype = response.getheader('Content-Type') + return self._parse_response(parser, contenttype, body) + + def _parse_response(self, parser, contenttype, body): + if not contenttype.startswith('text/xml'): + return body + handler = XmlHandler(parser, self) + xml.sax.parseString(body, handler) + return parser + + def method_for(self, name): + """Return the MWS API method referred to in the argument. + The named method can be in CamelCase or underlined_lower_case. + This is the complement to MWSConnection.any_call.action + """ + action = '_' in name and string.capwords(name, '_') or name + if action in api_call_map: + return getattr(self, api_call_map[action]) + return None + + def iter_call(self, call, *args, **kw): + """Pass a call name as the first argument and a generator + is returned for the initial response and any continuation + call responses made using the NextToken. + """ + method = self.method_for(call) + assert method, 'No call named "{0}"'.format(call) + return self.iter_response(method(*args, **kw)) + + def iter_response(self, response): + """Pass a call's response as the initial argument and a + generator is returned for the initial response and any + continuation call responses made using the NextToken. + """ + yield response + more = self.method_for(response._action + 'ByNextToken') + while more and response._result.HasNext == 'true': + response = more(NextToken=response._result.NextToken) + yield response + + @requires(['FeedType']) + @boolean_arguments('PurgeAndReplace') + @http_body('FeedContent') + @structured_lists('MarketplaceIdList.Id') + @api_action('Feeds', 15, 120) + def submit_feed(self, request, response, headers=None, body='', **kw): + """Uploads a feed for processing by Amazon MWS. + """ + headers = headers or {} + return self._post_request(request, kw, response, body=body, + headers=headers) + + @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type', + 'FeedProcessingStatusList.Status') + @api_action('Feeds', 10, 45) + def get_feed_submission_list(self, request, response, **kw): + """Returns a list of all feed submissions submitted in the + previous 90 days. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Feeds', 0, 0) + def get_feed_submission_list_by_next_token(self, request, response, **kw): + """Returns a list of feed submissions using the NextToken parameter. + """ + return self._post_request(request, kw, response) + + @structured_lists('FeedTypeList.Type', 'FeedProcessingStatusList.Status') + @api_action('Feeds', 10, 45) + def get_feed_submission_count(self, request, response, **kw): + """Returns a count of the feeds submitted in the previous 90 days. + """ + return self._post_request(request, kw, response) + + @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type') + @api_action('Feeds', 10, 45) + def cancel_feed_submissions(self, request, response, **kw): + """Cancels one or more feed submissions and returns a + count of the feed submissions that were canceled. + """ + return self._post_request(request, kw, response) + + @requires(['FeedSubmissionId']) + @api_action('Feeds', 15, 60) + def get_feed_submission_result(self, request, response, **kw): + """Returns the feed processing report. + """ + return self._post_request(request, kw, response) + + def get_service_status(self, **kw): + """Instruct the user on how to get service status. + """ + sections = ', '.join(map(str.lower, api_version_path.keys())) + message = "Use {0}.get_(section)_service_status(), " \ + "where (section) is one of the following: " \ + "{1}".format(self.__class__.__name__, sections) + raise AttributeError(message) + + @requires(['ReportType']) + @structured_lists('MarketplaceIdList.Id') + @boolean_arguments('ReportOptions=ShowSalesChannel') + @api_action('Reports', 15, 60) + def request_report(self, request, response, **kw): + """Creates a report request and submits the request to Amazon MWS. + """ + return self._post_request(request, kw, response) + + @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type', + 'ReportProcessingStatusList.Status') + @api_action('Reports', 10, 45) + def get_report_request_list(self, request, response, **kw): + """Returns a list of report requests that you can use to get the + ReportRequestId for a report. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Reports', 0, 0) + def get_report_request_list_by_next_token(self, request, response, **kw): + """Returns a list of report requests using the NextToken, + which was supplied by a previous request to either + GetReportRequestListByNextToken or GetReportRequestList, where + the value of HasNext was true in that previous request. + """ + return self._post_request(request, kw, response) + + @structured_lists('ReportTypeList.Type', + 'ReportProcessingStatusList.Status') + @api_action('Reports', 10, 45) + def get_report_request_count(self, request, response, **kw): + """Returns a count of report requests that have been submitted + to Amazon MWS for processing. + """ + return self._post_request(request, kw, response) + + @api_action('Reports', 10, 45) + def cancel_report_requests(self, request, response, **kw): + """Cancel one or more report requests, returning the count of the + canceled report requests and the report request information. + """ + return self._post_request(request, kw, response) + + @boolean_arguments('Acknowledged') + @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type') + @api_action('Reports', 10, 60) + def get_report_list(self, request, response, **kw): + """Returns a list of reports that were created in the previous + 90 days that match the query parameters. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Reports', 0, 0) + def get_report_list_by_next_token(self, request, response, **kw): + """Returns a list of reports using the NextToken, which + was supplied by a previous request to either + GetReportListByNextToken or GetReportList, where the + value of HasNext was true in the previous call. + """ + return self._post_request(request, kw, response) + + @boolean_arguments('Acknowledged') + @structured_lists('ReportTypeList.Type') + @api_action('Reports', 10, 45) + def get_report_count(self, request, response, **kw): + """Returns a count of the reports, created in the previous 90 days, + with a status of _DONE_ and that are available for download. + """ + return self._post_request(request, kw, response) + + @requires(['ReportId']) + @api_action('Reports', 15, 60) + def get_report(self, request, response, **kw): + """Returns the contents of a report. + """ + return self._post_request(request, kw, response) + + @requires(['ReportType', 'Schedule']) + @api_action('Reports', 10, 45) + def manage_report_schedule(self, request, response, **kw): + """Creates, updates, or deletes a report request schedule for + a specified report type. + """ + return self._post_request(request, kw, response) + + @structured_lists('ReportTypeList.Type') + @api_action('Reports', 10, 45) + def get_report_schedule_list(self, request, response, **kw): + """Returns a list of order report requests that are scheduled + to be submitted to Amazon MWS for processing. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Reports', 0, 0) + def get_report_schedule_list_by_next_token(self, request, response, **kw): + """Returns a list of report requests using the NextToken, + which was supplied by a previous request to either + GetReportScheduleListByNextToken or GetReportScheduleList, + where the value of HasNext was true in that previous request. + """ + return self._post_request(request, kw, response) + + @structured_lists('ReportTypeList.Type') + @api_action('Reports', 10, 45) + def get_report_schedule_count(self, request, response, **kw): + """Returns a count of order report requests that are scheduled + to be submitted to Amazon MWS. + """ + return self._post_request(request, kw, response) + + @requires(['ReportIdList']) + @boolean_arguments('Acknowledged') + @structured_lists('ReportIdList.Id') + @api_action('Reports', 10, 45) + def update_report_acknowledgements(self, request, response, **kw): + """Updates the acknowledged status of one or more reports. + """ + return self._post_request(request, kw, response) + + @requires(['ShipFromAddress', 'InboundShipmentPlanRequestItems']) + @structured_objects('ShipFromAddress', 'InboundShipmentPlanRequestItems') + @api_action('Inbound', 30, 0.5) + def create_inbound_shipment_plan(self, request, response, **kw): + """Returns the information required to create an inbound shipment. + """ + return self._post_request(request, kw, response) + + @requires(['ShipmentId', 'InboundShipmentHeader', 'InboundShipmentItems']) + @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') + @api_action('Inbound', 30, 0.5) + def create_inbound_shipment(self, request, response, **kw): + """Creates an inbound shipment. + """ + return self._post_request(request, kw, response) + + @requires(['ShipmentId']) + @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') + @api_action('Inbound', 30, 0.5) + def update_inbound_shipment(self, request, response, **kw): + """Updates an existing inbound shipment. Amazon documentation + is ambiguous as to whether the InboundShipmentHeader and + InboundShipmentItems arguments are required. + """ + return self._post_request(request, kw, response) + + @requires_some_of('ShipmentIdList', 'ShipmentStatusList') + @structured_lists('ShipmentIdList.Id', 'ShipmentStatusList.Status') + @api_action('Inbound', 30, 0.5) + def list_inbound_shipments(self, request, response, **kw): + """Returns a list of inbound shipments based on criteria that + you specify. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Inbound', 30, 0.5) + def list_inbound_shipments_by_next_token(self, request, response, **kw): + """Returns the next page of inbound shipments using the NextToken + parameter. + """ + return self._post_request(request, kw, response) + + @requires(['ShipmentId'], ['LastUpdatedAfter', 'LastUpdatedBefore']) + @api_action('Inbound', 30, 0.5) + def list_inbound_shipment_items(self, request, response, **kw): + """Returns a list of items in a specified inbound shipment, or a + list of items that were updated within a specified time frame. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Inbound', 30, 0.5) + def list_inbound_shipment_items_by_next_token(self, request, response, **kw): + """Returns the next page of inbound shipment items using the + NextToken parameter. + """ + return self._post_request(request, kw, response) + + @api_action('Inbound', 2, 300, 'GetServiceStatus') + def get_inbound_service_status(self, request, response, **kw): + """Returns the operational status of the Fulfillment Inbound + Shipment API section. + """ + return self._post_request(request, kw, response) + + @requires(['SellerSkus'], ['QueryStartDateTime']) + @structured_lists('SellerSkus.member') + @api_action('Inventory', 30, 0.5) + def list_inventory_supply(self, request, response, **kw): + """Returns information about the availability of a seller's + inventory. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Inventory', 30, 0.5) + def list_inventory_supply_by_next_token(self, request, response, **kw): + """Returns the next page of information about the availability + of a seller's inventory using the NextToken parameter. + """ + return self._post_request(request, kw, response) + + @api_action('Inventory', 2, 300, 'GetServiceStatus') + def get_inventory_service_status(self, request, response, **kw): + """Returns the operational status of the Fulfillment Inventory + API section. + """ + return self._post_request(request, kw, response) + + @requires(['PackageNumber']) + @api_action('Outbound', 30, 0.5) + def get_package_tracking_details(self, request, response, **kw): + """Returns delivery tracking information for a package in + an outbound shipment for a Multi-Channel Fulfillment order. + """ + return self._post_request(request, kw, response) + + @requires(['Address', 'Items']) + @structured_objects('Address', 'Items') + @api_action('Outbound', 30, 0.5) + def get_fulfillment_preview(self, request, response, **kw): + """Returns a list of fulfillment order previews based on items + and shipping speed categories that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['SellerFulfillmentOrderId', 'DisplayableOrderId', + 'ShippingSpeedCategory', 'DisplayableOrderDateTime', + 'DestinationAddress', 'DisplayableOrderComment', + 'Items']) + @structured_objects('DestinationAddress', 'Items') + @api_action('Outbound', 30, 0.5) + def create_fulfillment_order(self, request, response, **kw): + """Requests that Amazon ship items from the seller's inventory + to a destination address. + """ + return self._post_request(request, kw, response) + + @requires(['SellerFulfillmentOrderId']) + @api_action('Outbound', 30, 0.5) + def get_fulfillment_order(self, request, response, **kw): + """Returns a fulfillment order based on a specified + SellerFulfillmentOrderId. + """ + return self._post_request(request, kw, response) + + @api_action('Outbound', 30, 0.5) + def list_all_fulfillment_orders(self, request, response, **kw): + """Returns a list of fulfillment orders fulfilled after (or + at) a specified date or by fulfillment method. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Outbound', 30, 0.5) + def list_all_fulfillment_orders_by_next_token(self, request, response, **kw): + """Returns the next page of inbound shipment items using the + NextToken parameter. + """ + return self._post_request(request, kw, response) + + @requires(['SellerFulfillmentOrderId']) + @api_action('Outbound', 30, 0.5) + def cancel_fulfillment_order(self, request, response, **kw): + """Requests that Amazon stop attempting to fulfill an existing + fulfillment order. + """ + return self._post_request(request, kw, response) + + @api_action('Outbound', 2, 300, 'GetServiceStatus') + def get_outbound_service_status(self, request, response, **kw): + """Returns the operational status of the Fulfillment Outbound + API section. + """ + return self._post_request(request, kw, response) + + @requires(['CreatedAfter'], ['LastUpdatedAfter']) + @requires(['MarketplaceId']) + @exclusive(['CreatedAfter'], ['LastUpdatedAfter']) + @dependent('CreatedBefore', ['CreatedAfter']) + @exclusive(['LastUpdatedAfter'], ['BuyerEmail'], ['SellerOrderId']) + @dependent('LastUpdatedBefore', ['LastUpdatedAfter']) + @exclusive(['CreatedAfter'], ['LastUpdatedBefore']) + @structured_objects('OrderTotal', 'ShippingAddress', + 'PaymentExecutionDetail') + @structured_lists('MarketplaceId.Id', 'OrderStatus.Status', + 'FulfillmentChannel.Channel', 'PaymentMethod.') + @api_action('Orders', 6, 60) + def list_orders(self, request, response, **kw): + """Returns a list of orders created or updated during a time + frame that you specify. + """ + toggle = set(('FulfillmentChannel.Channel.1', + 'OrderStatus.Status.1', 'PaymentMethod.1', + 'LastUpdatedAfter', 'LastUpdatedBefore')) + for do, dont in { + 'BuyerEmail': toggle.union(['SellerOrderId']), + 'SellerOrderId': toggle.union(['BuyerEmail']), + }.items(): + if do in kw and any(i in dont for i in kw): + message = "Don't include {0} when specifying " \ + "{1}".format(' or '.join(dont), do) + raise AssertionError(message) + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Orders', 6, 60) + def list_orders_by_next_token(self, request, response, **kw): + """Returns the next page of orders using the NextToken value + that was returned by your previous request to either + ListOrders or ListOrdersByNextToken. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderId']) + @structured_lists('AmazonOrderId.Id') + @api_action('Orders', 6, 60) + def get_order(self, request, response, **kw): + """Returns an order for each AmazonOrderId that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderId']) + @api_action('Orders', 30, 2) + def list_order_items(self, request, response, **kw): + """Returns order item information for an AmazonOrderId that + you specify. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Orders', 30, 2) + def list_order_items_by_next_token(self, request, response, **kw): + """Returns the next page of order items using the NextToken + value that was returned by your previous request to either + ListOrderItems or ListOrderItemsByNextToken. + """ + return self._post_request(request, kw, response) + + @api_action('Orders', 2, 300, 'GetServiceStatus') + def get_orders_service_status(self, request, response, **kw): + """Returns the operational status of the Orders API section. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Query']) + @api_action('Products', 20, 20) + def list_matching_products(self, request, response, **kw): + """Returns a list of products and their attributes, ordered + by relevancy, based on a search query that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 20) + def get_matching_product(self, request, response, **kw): + """Returns a list of products and their attributes, based on + a list of ASIN values that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'IdType', 'IdList']) + @structured_lists('IdList.Id') + @api_action('Products', 20, 20) + def get_matching_product_for_id(self, request, response, **kw): + """Returns a list of products and their attributes, based on + a list of Product IDs that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'SellerSKUList']) + @structured_lists('SellerSKUList.SellerSKU') + @api_action('Products', 20, 10, 'GetCompetitivePricingForSKU') + def get_competitive_pricing_for_sku(self, request, response, **kw): + """Returns the current competitive pricing of a product, + based on the SellerSKUs and MarketplaceId that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 10, 'GetCompetitivePricingForASIN') + def get_competitive_pricing_for_asin(self, request, response, **kw): + """Returns the current competitive pricing of a product, + based on the ASINs and MarketplaceId that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'SellerSKUList']) + @structured_lists('SellerSKUList.SellerSKU') + @api_action('Products', 20, 5, 'GetLowestOfferListingsForSKU') + def get_lowest_offer_listings_for_sku(self, request, response, **kw): + """Returns the lowest price offer listings for a specific + product by item condition and SellerSKUs. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 5, 'GetLowestOfferListingsForASIN') + def get_lowest_offer_listings_for_asin(self, request, response, **kw): + """Returns the lowest price offer listings for a specific + product by item condition and ASINs. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'SellerSKU']) + @api_action('Products', 20, 20, 'GetProductCategoriesForSKU') + def get_product_categories_for_sku(self, request, response, **kw): + """Returns the product categories that a SellerSKU belongs to. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'ASIN']) + @api_action('Products', 20, 20, 'GetProductCategoriesForASIN') + def get_product_categories_for_asin(self, request, response, **kw): + """Returns the product categories that an ASIN belongs to. + """ + return self._post_request(request, kw, response) + + @api_action('Products', 2, 300, 'GetServiceStatus') + def get_products_service_status(self, request, response, **kw): + """Returns the operational status of the Products API section. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'SellerSKUList']) + @structured_lists('SellerSKUList.SellerSKU') + @api_action('Products', 20, 10, 'GetMyPriceForSKU') + def get_my_price_for_sku(self, request, response, **kw): + """Returns pricing information for your own offer listings, based on SellerSKU. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 10, 'GetMyPriceForASIN') + def get_my_price_for_asin(self, request, response, **kw): + """Returns pricing information for your own offer listings, based on ASIN. + """ + return self._post_request(request, kw, response) + + @api_action('Sellers', 15, 60) + def list_marketplace_participations(self, request, response, **kw): + """Returns a list of marketplaces that the seller submitting + the request can sell in, and a list of participations that + include seller-specific information in that marketplace. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Sellers', 15, 60) + def list_marketplace_participations_by_next_token(self, request, response, + **kw): + """Returns the next page of marketplaces and participations + using the NextToken value that was returned by your + previous request to either ListMarketplaceParticipations + or ListMarketplaceParticipationsByNextToken. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @api_action('Recommendations', 5, 2) + def get_last_updated_time_for_recommendations(self, request, response, + **kw): + """Checks whether there are active recommendations for each category + for the given marketplace, and if there are, returns the time when + recommendations were last updated for each category. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @structured_lists('CategoryQueryList.CategoryQuery') + @api_action('Recommendations', 5, 2) + def list_recommendations(self, request, response, **kw): + """Returns your active recommendations for a specific category or for + all categories for a specific marketplace. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Recommendations', 5, 2) + def list_recommendations_by_next_token(self, request, response, **kw): + """Returns the next page of recommendations using the NextToken + parameter. + """ + return self._post_request(request, kw, response) + + @api_action('Recommendations', 2, 300, 'GetServiceStatus') + def get_recommendations_service_status(self, request, response, **kw): + """Returns the operational status of the Recommendations API section. + """ + return self._post_request(request, kw, response) + + @api_action('CustomerInfo', 15, 12) + def list_customers(self, request, response, **kw): + """Returns a list of customer accounts based on search criteria that + you specify. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('CustomerInfo', 50, 3) + def list_customers_by_next_token(self, request, response, **kw): + """Returns the next page of customers using the NextToken parameter. + """ + return self._post_request(request, kw, response) + + @requires(['CustomerIdList']) + @structured_lists('CustomerIdList.CustomerId') + @api_action('CustomerInfo', 15, 12) + def get_customers_for_customer_id(self, request, response, **kw): + """Returns a list of customer accounts based on search criteria that + you specify. + """ + return self._post_request(request, kw, response) + + @api_action('CustomerInfo', 2, 300, 'GetServiceStatus') + def get_customerinfo_service_status(self, request, response, **kw): + """Returns the operational status of the Customer Information API + section. + """ + return self._post_request(request, kw, response) + + @requires(['DateRangeStart']) + @api_action('CartInfo', 15, 12) + def list_carts(self, request, response, **kw): + """Returns a list of shopping carts in your Webstore that were last + updated during the time range that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('CartInfo', 50, 3) + def list_carts_by_next_token(self, request, response, **kw): + """Returns the next page of shopping carts using the NextToken + parameter. + """ + return self._post_request(request, kw, response) + + @requires(['CartIdList']) + @structured_lists('CartIdList.CartId') + @api_action('CartInfo', 15, 12) + def get_carts(self, request, response, **kw): + """Returns shopping carts based on the CartId values that you specify. + """ + return self._post_request(request, kw, response) + + @api_action('CartInfo', 2, 300, 'GetServiceStatus') + def get_cartinfo_service_status(self, request, response, **kw): + """Returns the operational status of the Cart Information API section. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def register_destination(self, request, response, **kw): + """Specifies a new destination where you want to receive notifications. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def deregister_destination(self, request, response, **kw): + """Removes an existing destination from the list of registered + destinations. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @api_action('Subscriptions', 25, 0.5) + def list_registered_destinations(self, request, response, **kw): + """Lists all current destinations that you have registered. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def send_test_notification_to_destination(self, request, response, **kw): + """Sends a test notification to an existing destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Subscription']) + @structured_objects('Subscription', members=True) + @api_action('Subscriptions', 25, 0.5) + def create_subscription(self, request, response, **kw): + """Creates a new subscription for the specified notification type + and destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'NotificationType', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def get_subscription(self, request, response, **kw): + """Gets the subscription for the specified notification type and + destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'NotificationType', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def delete_subscription(self, request, response, **kw): + """Deletes the subscription for the specified notification type and + destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @api_action('Subscriptions', 25, 0.5) + def list_subscriptions(self, request, response, **kw): + """Returns a list of all your current subscriptions. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Subscription']) + @structured_objects('Subscription', members=True) + @api_action('Subscriptions', 25, 0.5) + def update_subscription(self, request, response, **kw): + """Updates the subscription for the specified notification type and + destination. + """ + return self._post_request(request, kw, response) + + @api_action('Subscriptions', 2, 300, 'GetServiceStatus') + def get_subscriptions_service_status(self, request, response, **kw): + """Returns the operational status of the Subscriptions API section. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId', 'OrderReferenceAttributes']) + @structured_objects('OrderReferenceAttributes') + @api_action('OffAmazonPayments', 10, 1) + def set_order_reference_details(self, request, response, **kw): + """Sets order reference details such as the order total and a + description for the order. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 20, 2) + def get_order_reference_details(self, request, response, **kw): + """Returns details about the Order Reference object and its current + state. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 10, 1) + def confirm_order_reference(self, request, response, **kw): + """Confirms that the order reference is free of constraints and all + required information has been set on the order reference. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 10, 1) + def cancel_order_reference(self, request, response, **kw): + """Cancel an order reference; all authorizations associated with + this order reference are also closed. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 10, 1) + def close_order_reference(self, request, response, **kw): + """Confirms that an order reference has been fulfilled (fully + or partially) and that you do not expect to create any new + authorizations on this order reference. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId', 'AuthorizationReferenceId', + 'AuthorizationAmount']) + @structured_objects('AuthorizationAmount') + @api_action('OffAmazonPayments', 10, 1) + def authorize(self, request, response, **kw): + """Reserves a specified amount against the payment method(s) stored in + the order reference. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonAuthorizationId']) + @api_action('OffAmazonPayments', 20, 2) + def get_authorization_details(self, request, response, **kw): + """Returns the status of a particular authorization and the total + amount captured on the authorization. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonAuthorizationId', 'CaptureReferenceId', 'CaptureAmount']) + @structured_objects('CaptureAmount') + @api_action('OffAmazonPayments', 10, 1) + def capture(self, request, response, **kw): + """Captures funds from an authorized payment instrument. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonCaptureId']) + @api_action('OffAmazonPayments', 20, 2) + def get_capture_details(self, request, response, **kw): + """Returns the status of a particular capture and the total amount + refunded on the capture. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonAuthorizationId']) + @api_action('OffAmazonPayments', 10, 1) + def close_authorization(self, request, response, **kw): + """Closes an authorization. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonCaptureId', 'RefundReferenceId', 'RefundAmount']) + @structured_objects('RefundAmount') + @api_action('OffAmazonPayments', 10, 1) + def refund(self, request, response, **kw): + """Refunds a previously captured amount. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonRefundId']) + @api_action('OffAmazonPayments', 20, 2) + def get_refund_details(self, request, response, **kw): + """Returns the status of a particular refund. + """ + return self._post_request(request, kw, response) + + @api_action('OffAmazonPayments', 2, 300, 'GetServiceStatus') + def get_offamazonpayments_service_status(self, request, response, **kw): + """Returns the operational status of the Off-Amazon Payments API + section. + """ + return self._post_request(request, kw, response) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mws/exception.py b/desktop/core/ext-py/boto-2.38.0/boto/mws/exception.py new file mode 100644 index 0000000000000000000000000000000000000000..fba8a5d5d26526a78299e4261e0c5907f823817b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mws/exception.py @@ -0,0 +1,70 @@ +# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from boto.exception import BotoServerError +from boto.mws.response import ResponseFactory + + +class ResponseErrorFactory(ResponseFactory): + + def __call__(self, status, reason, body=None): + server = BotoServerError(status, reason, body=body) + supplied = self.find_element(server.error_code, '', ResponseError) + print(supplied.__name__) + return supplied(status, reason, body=body) + + +class ResponseError(BotoServerError): + """ + Undefined response error. + """ + retry = False + + def __repr__(self): + return '{0.__name__}({1.reason}: "{1.message}")' \ + .format(self.__class__, self) + + def __str__(self): + doc = self.__doc__ and self.__doc__.strip() + "\n" or '' + return '{1.__name__}: {0.reason} {2}\n{3}' \ + '{0.message}'.format(self, self.__class__, + self.retry and '(Retriable)' or '', doc) + + +class RetriableResponseError(ResponseError): + retry = True + + +class InvalidParameterValue(ResponseError): + """ + One or more parameter values in the request is invalid. + """ + + +class InvalidParameter(ResponseError): + """ + One or more parameters in the request is invalid. + """ + + +class InvalidAddress(ResponseError): + """ + Invalid address. + """ diff --git a/desktop/core/ext-py/boto-2.38.0/boto/mws/response.py b/desktop/core/ext-py/boto-2.38.0/boto/mws/response.py new file mode 100644 index 0000000000000000000000000000000000000000..7e2e23c07e689c2e4f1564ccb7ddb6ddd202348b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/mws/response.py @@ -0,0 +1,787 @@ +# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, dis- tribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the fol- lowing conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- ITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +from decimal import Decimal +from boto.compat import filter, map + + +class ComplexType(dict): + _value = 'Value' + + def __repr__(self): + return '{0}{1}'.format(getattr(self, self._value, None), self.copy()) + + def __str__(self): + return str(getattr(self, self._value, '')) + + +class DeclarativeType(object): + def __init__(self, _hint=None, **kw): + self._value = None + if _hint is not None: + self._hint = _hint + return + + class JITResponse(ResponseElement): + pass + self._hint = JITResponse + self._hint.__name__ = 'JIT_{0}/{1}'.format(self.__class__.__name__, + hex(id(self._hint))[2:]) + for name, value in kw.items(): + setattr(self._hint, name, value) + + def __repr__(self): + parent = getattr(self, '_parent', None) + return '<{0}_{1}/{2}_{3}>'.format(self.__class__.__name__, + parent and parent._name or '?', + getattr(self, '_name', '?'), + hex(id(self.__class__))) + + def setup(self, parent, name, *args, **kw): + self._parent = parent + self._name = name + self._clone = self.__class__(_hint=self._hint) + self._clone._parent = parent + self._clone._name = name + setattr(self._parent, self._name, self._clone) + + def start(self, *args, **kw): + raise NotImplementedError + + def end(self, *args, **kw): + raise NotImplementedError + + def teardown(self, *args, **kw): + setattr(self._parent, self._name, self._value) + + +class Element(DeclarativeType): + def start(self, *args, **kw): + self._value = self._hint(parent=self._parent, **kw) + return self._value + + def end(self, *args, **kw): + pass + + +class SimpleList(DeclarativeType): + def __init__(self, *args, **kw): + super(SimpleList, self).__init__(*args, **kw) + self._value = [] + + def start(self, *args, **kw): + return None + + def end(self, name, value, *args, **kw): + self._value.append(value) + + +class ElementList(SimpleList): + def start(self, *args, **kw): + value = self._hint(parent=self._parent, **kw) + self._value.append(value) + return value + + def end(self, *args, **kw): + pass + + +class MemberList(Element): + def __init__(self, _member=None, _hint=None, *args, **kw): + message = 'Invalid `member` specification in {0}'.format(self.__class__.__name__) + assert 'member' not in kw, message + if _member is None: + if _hint is None: + super(MemberList, self).__init__(*args, member=ElementList(**kw)) + else: + super(MemberList, self).__init__(_hint=_hint) + else: + if _hint is None: + if issubclass(_member, DeclarativeType): + member = _member(**kw) + else: + member = ElementList(_member, **kw) + super(MemberList, self).__init__(*args, member=member) + else: + message = 'Nonsensical {0} hint {1!r}'.format(self.__class__.__name__, + _hint) + raise AssertionError(message) + + def teardown(self, *args, **kw): + if self._value is None: + self._value = [] + else: + if isinstance(self._value.member, DeclarativeType): + self._value.member = [] + self._value = self._value.member + super(MemberList, self).teardown(*args, **kw) + + +class ResponseFactory(object): + def __init__(self, scopes=None): + self.scopes = [] if scopes is None else scopes + + def element_factory(self, name, parent): + class DynamicElement(parent): + _name = name + setattr(DynamicElement, '__name__', str(name)) + return DynamicElement + + def search_scopes(self, key): + for scope in self.scopes: + if hasattr(scope, key): + return getattr(scope, key) + if hasattr(scope, '__getitem__'): + if key in scope: + return scope[key] + + def find_element(self, action, suffix, parent): + element = self.search_scopes(action + suffix) + if element is not None: + return element + if action.endswith('ByNextToken'): + element = self.search_scopes(action[:-len('ByNextToken')] + suffix) + if element is not None: + return self.element_factory(action + suffix, element) + return self.element_factory(action + suffix, parent) + + def __call__(self, action, connection=None): + response = self.find_element(action, 'Response', Response) + if not hasattr(response, action + 'Result'): + result = self.find_element(action, 'Result', ResponseElement) + setattr(response, action + 'Result', Element(result)) + return response(connection=connection) + + +def strip_namespace(func): + def wrapper(self, name, *args, **kw): + if self._namespace is not None: + if name.startswith(self._namespace + ':'): + name = name[len(self._namespace + ':'):] + return func(self, name, *args, **kw) + return wrapper + + +class ResponseElement(dict): + _override = {} + _name = None + _namespace = None + + def __init__(self, connection=None, name=None, parent=None, attrs=None): + if parent is not None and self._namespace is None: + self._namespace = parent._namespace + if connection is not None: + self._connection = connection + self._name = name or self._name or self.__class__.__name__ + self._declared('setup', attrs=attrs) + dict.__init__(self, attrs and attrs.copy() or {}) + + def _declared(self, op, **kw): + def inherit(obj): + result = {} + for cls in getattr(obj, '__bases__', ()): + result.update(inherit(cls)) + result.update(obj.__dict__) + return result + + scope = inherit(self.__class__) + scope.update(self.__dict__) + declared = lambda attr: isinstance(attr[1], DeclarativeType) + for name, node in filter(declared, scope.items()): + getattr(node, op)(self, name, parentname=self._name, **kw) + + @property + def connection(self): + return self._connection + + def __repr__(self): + render = lambda pair: '{0!s}: {1!r}'.format(*pair) + do_show = lambda pair: not pair[0].startswith('_') + attrs = filter(do_show, self.__dict__.items()) + name = self.__class__.__name__ + if name.startswith('JIT_'): + name = '^{0}^'.format(self._name or '') + return '{0}{1!r}({2})'.format( + name, self.copy(), ', '.join(map(render, attrs))) + + def _type_for(self, name, attrs): + return self._override.get(name, globals().get(name, ResponseElement)) + + @strip_namespace + def startElement(self, name, attrs, connection): + attribute = getattr(self, name, None) + if isinstance(attribute, DeclarativeType): + return attribute.start(name=name, attrs=attrs, + connection=connection) + elif attrs.getLength(): + setattr(self, name, ComplexType(attrs.copy())) + else: + return None + + @strip_namespace + def endElement(self, name, value, connection): + attribute = getattr(self, name, None) + if name == self._name: + self._declared('teardown') + elif isinstance(attribute, DeclarativeType): + attribute.end(name=name, value=value, connection=connection) + elif isinstance(attribute, ComplexType): + setattr(attribute, attribute._value, value) + else: + setattr(self, name, value) + + +class Response(ResponseElement): + ResponseMetadata = Element() + + @strip_namespace + def startElement(self, name, attrs, connection): + if name == self._name: + self.update(attrs) + else: + return super(Response, self).startElement(name, attrs, connection) + + @property + def _result(self): + return getattr(self, self._action + 'Result', None) + + @property + def _action(self): + return (self._name or self.__class__.__name__)[:-len('Response')] + + +class ResponseResultList(Response): + _ResultClass = ResponseElement + + def __init__(self, *args, **kw): + setattr(self, self._action + 'Result', ElementList(self._ResultClass)) + super(ResponseResultList, self).__init__(*args, **kw) + + +class FeedSubmissionInfo(ResponseElement): + pass + + +class SubmitFeedResult(ResponseElement): + FeedSubmissionInfo = Element(FeedSubmissionInfo) + + +class GetFeedSubmissionListResult(ResponseElement): + FeedSubmissionInfo = ElementList(FeedSubmissionInfo) + + +class GetFeedSubmissionCountResult(ResponseElement): + pass + + +class CancelFeedSubmissionsResult(GetFeedSubmissionListResult): + pass + + +class GetServiceStatusResult(ResponseElement): + Messages = Element(Messages=ElementList()) + + +class ReportRequestInfo(ResponseElement): + pass + + +class RequestReportResult(ResponseElement): + ReportRequestInfo = Element() + + +class GetReportRequestListResult(RequestReportResult): + ReportRequestInfo = ElementList() + + +class CancelReportRequestsResult(RequestReportResult): + pass + + +class GetReportListResult(ResponseElement): + ReportInfo = ElementList() + + +class ManageReportScheduleResult(ResponseElement): + ReportSchedule = Element() + + +class GetReportScheduleListResult(ManageReportScheduleResult): + pass + + +class UpdateReportAcknowledgementsResult(GetReportListResult): + pass + + +class CreateInboundShipmentPlanResult(ResponseElement): + InboundShipmentPlans = MemberList(ShipToAddress=Element(), + Items=MemberList()) + + +class ListInboundShipmentsResult(ResponseElement): + ShipmentData = MemberList(ShipFromAddress=Element()) + + +class ListInboundShipmentItemsResult(ResponseElement): + ItemData = MemberList() + + +class ListInventorySupplyResult(ResponseElement): + InventorySupplyList = MemberList( + EarliestAvailability=Element(), + SupplyDetail=MemberList( + EarliestAvailableToPick=Element(), + LatestAvailableToPick=Element(), + ) + ) + + +class ComplexAmount(ResponseElement): + _amount = 'Value' + + def __repr__(self): + return '{0} {1}'.format(self.CurrencyCode, getattr(self, self._amount)) + + def __float__(self): + return float(getattr(self, self._amount)) + + def __str__(self): + return str(getattr(self, self._amount)) + + @strip_namespace + def startElement(self, name, attrs, connection): + if name not in ('CurrencyCode', self._amount): + message = 'Unrecognized tag {0} in ComplexAmount'.format(name) + raise AssertionError(message) + return super(ComplexAmount, self).startElement(name, attrs, connection) + + @strip_namespace + def endElement(self, name, value, connection): + if name == self._amount: + value = Decimal(value) + super(ComplexAmount, self).endElement(name, value, connection) + + +class ComplexMoney(ComplexAmount): + _amount = 'Amount' + + +class ComplexWeight(ResponseElement): + def __repr__(self): + return '{0} {1}'.format(self.Value, self.Unit) + + def __float__(self): + return float(self.Value) + + def __str__(self): + return str(self.Value) + + @strip_namespace + def startElement(self, name, attrs, connection): + if name not in ('Unit', 'Value'): + message = 'Unrecognized tag {0} in ComplexWeight'.format(name) + raise AssertionError(message) + return super(ComplexWeight, self).startElement(name, attrs, connection) + + @strip_namespace + def endElement(self, name, value, connection): + if name == 'Value': + value = Decimal(value) + super(ComplexWeight, self).endElement(name, value, connection) + + +class Dimension(ComplexType): + _value = 'Value' + + +class ComplexDimensions(ResponseElement): + _dimensions = ('Height', 'Length', 'Width', 'Weight') + + def __repr__(self): + values = [getattr(self, key, None) for key in self._dimensions] + values = filter(None, values) + return 'x'.join(map('{0.Value:0.2f}{0[Units]}'.format, values)) + + @strip_namespace + def startElement(self, name, attrs, connection): + if name not in self._dimensions: + message = 'Unrecognized tag {0} in ComplexDimensions'.format(name) + raise AssertionError(message) + setattr(self, name, Dimension(attrs.copy())) + + @strip_namespace + def endElement(self, name, value, connection): + if name in self._dimensions: + value = Decimal(value or '0') + ResponseElement.endElement(self, name, value, connection) + + +class FulfillmentPreviewItem(ResponseElement): + EstimatedShippingWeight = Element(ComplexWeight) + + +class FulfillmentPreview(ResponseElement): + EstimatedShippingWeight = Element(ComplexWeight) + EstimatedFees = MemberList(Amount=Element(ComplexAmount)) + UnfulfillablePreviewItems = MemberList(FulfillmentPreviewItem) + FulfillmentPreviewShipments = MemberList( + FulfillmentPreviewItems=MemberList(FulfillmentPreviewItem), + ) + + +class GetFulfillmentPreviewResult(ResponseElement): + FulfillmentPreviews = MemberList(FulfillmentPreview) + + +class FulfillmentOrder(ResponseElement): + DestinationAddress = Element() + NotificationEmailList = MemberList(SimpleList) + + +class GetFulfillmentOrderResult(ResponseElement): + FulfillmentOrder = Element(FulfillmentOrder) + FulfillmentShipment = MemberList( + FulfillmentShipmentItem=MemberList(), + FulfillmentShipmentPackage=MemberList(), + ) + FulfillmentOrderItem = MemberList() + + +class ListAllFulfillmentOrdersResult(ResponseElement): + FulfillmentOrders = MemberList(FulfillmentOrder) + + +class GetPackageTrackingDetailsResult(ResponseElement): + ShipToAddress = Element() + TrackingEvents = MemberList(EventAddress=Element()) + + +class Image(ResponseElement): + pass + + +class AttributeSet(ResponseElement): + ItemDimensions = Element(ComplexDimensions) + ListPrice = Element(ComplexMoney) + PackageDimensions = Element(ComplexDimensions) + SmallImage = Element(Image) + + +class ItemAttributes(AttributeSet): + Languages = Element(Language=ElementList()) + + def __init__(self, *args, **kw): + names = ('Actor', 'Artist', 'Author', 'Creator', 'Director', + 'Feature', 'Format', 'GemType', 'MaterialType', + 'MediaType', 'OperatingSystem', 'Platform') + for name in names: + setattr(self, name, SimpleList()) + super(ItemAttributes, self).__init__(*args, **kw) + + +class VariationRelationship(ResponseElement): + Identifiers = Element(MarketplaceASIN=Element(), + SKUIdentifier=Element()) + GemType = SimpleList() + MaterialType = SimpleList() + OperatingSystem = SimpleList() + + +class Price(ResponseElement): + LandedPrice = Element(ComplexMoney) + ListingPrice = Element(ComplexMoney) + Shipping = Element(ComplexMoney) + + +class CompetitivePrice(ResponseElement): + Price = Element(Price) + + +class CompetitivePriceList(ResponseElement): + CompetitivePrice = ElementList(CompetitivePrice) + + +class CompetitivePricing(ResponseElement): + CompetitivePrices = Element(CompetitivePriceList) + NumberOfOfferListings = SimpleList() + TradeInValue = Element(ComplexMoney) + + +class SalesRank(ResponseElement): + pass + + +class LowestOfferListing(ResponseElement): + Qualifiers = Element(ShippingTime=Element()) + Price = Element(Price) + + +class Offer(ResponseElement): + BuyingPrice = Element(Price) + RegularPrice = Element(ComplexMoney) + + +class Product(ResponseElement): + _namespace = 'ns2' + Identifiers = Element(MarketplaceASIN=Element(), + SKUIdentifier=Element()) + AttributeSets = Element( + ItemAttributes=ElementList(ItemAttributes), + ) + Relationships = Element( + VariationParent=ElementList(VariationRelationship), + ) + CompetitivePricing = ElementList(CompetitivePricing) + SalesRankings = Element( + SalesRank=ElementList(SalesRank), + ) + LowestOfferListings = Element( + LowestOfferListing=ElementList(LowestOfferListing), + ) + Offers = Element( + Offer=ElementList(Offer), + ) + + +class ListMatchingProductsResult(ResponseElement): + Products = Element(Product=ElementList(Product)) + + +class ProductsBulkOperationResult(ResponseElement): + Product = Element(Product) + Error = Element() + + +class ProductsBulkOperationResponse(ResponseResultList): + _ResultClass = ProductsBulkOperationResult + + +class GetMatchingProductResponse(ProductsBulkOperationResponse): + pass + + +class GetMatchingProductForIdResult(ListMatchingProductsResult): + pass + + +class GetMatchingProductForIdResponse(ResponseResultList): + _ResultClass = GetMatchingProductForIdResult + + +class GetCompetitivePricingForSKUResponse(ProductsBulkOperationResponse): + pass + + +class GetCompetitivePricingForASINResponse(ProductsBulkOperationResponse): + pass + + +class GetLowestOfferListingsForSKUResponse(ProductsBulkOperationResponse): + pass + + +class GetLowestOfferListingsForASINResponse(ProductsBulkOperationResponse): + pass + + +class GetMyPriceForSKUResponse(ProductsBulkOperationResponse): + pass + + +class GetMyPriceForASINResponse(ProductsBulkOperationResponse): + pass + + +class ProductCategory(ResponseElement): + + def __init__(self, *args, **kw): + setattr(self, 'Parent', Element(ProductCategory)) + super(ProductCategory, self).__init__(*args, **kw) + + +class GetProductCategoriesResult(ResponseElement): + Self = ElementList(ProductCategory) + + +class GetProductCategoriesForSKUResult(GetProductCategoriesResult): + pass + + +class GetProductCategoriesForASINResult(GetProductCategoriesResult): + pass + + +class Order(ResponseElement): + OrderTotal = Element(ComplexMoney) + ShippingAddress = Element() + PaymentExecutionDetail = Element( + PaymentExecutionDetailItem=ElementList( + PaymentExecutionDetailItem=Element( + Payment=Element(ComplexMoney) + ) + ) + ) + + +class ListOrdersResult(ResponseElement): + Orders = Element(Order=ElementList(Order)) + + +class GetOrderResult(ListOrdersResult): + pass + + +class OrderItem(ResponseElement): + ItemPrice = Element(ComplexMoney) + ShippingPrice = Element(ComplexMoney) + GiftWrapPrice = Element(ComplexMoney) + ItemTax = Element(ComplexMoney) + ShippingTax = Element(ComplexMoney) + GiftWrapTax = Element(ComplexMoney) + ShippingDiscount = Element(ComplexMoney) + PromotionDiscount = Element(ComplexMoney) + PromotionIds = SimpleList() + CODFee = Element(ComplexMoney) + CODFeeDiscount = Element(ComplexMoney) + + +class ListOrderItemsResult(ResponseElement): + OrderItems = Element(OrderItem=ElementList(OrderItem)) + + +class ListMarketplaceParticipationsResult(ResponseElement): + ListParticipations = Element(Participation=ElementList()) + ListMarketplaces = Element(Marketplace=ElementList()) + + +class ListRecommendationsResult(ResponseElement): + ListingQualityRecommendations = MemberList(ItemIdentifier=Element()) + + +class Customer(ResponseElement): + PrimaryContactInfo = Element() + ShippingAddressList = Element(ShippingAddress=ElementList()) + AssociatedMarketplaces = Element(MarketplaceDomain=ElementList()) + + +class ListCustomersResult(ResponseElement): + CustomerList = Element(Customer=ElementList(Customer)) + + +class GetCustomersForCustomerIdResult(ListCustomersResult): + pass + + +class CartItem(ResponseElement): + CurrentPrice = Element(ComplexMoney) + SalePrice = Element(ComplexMoney) + + +class Cart(ResponseElement): + ActiveCartItemList = Element(CartItem=ElementList(CartItem)) + SavedCartItemList = Element(CartItem=ElementList(CartItem)) + + +class ListCartsResult(ResponseElement): + CartList = Element(Cart=ElementList(Cart)) + + +class GetCartsResult(ListCartsResult): + pass + + +class Destination(ResponseElement): + AttributeList = MemberList() + + +class ListRegisteredDestinationsResult(ResponseElement): + DestinationList = MemberList(Destination) + + +class Subscription(ResponseElement): + Destination = Element(Destination) + + +class GetSubscriptionResult(ResponseElement): + Subscription = Element(Subscription) + + +class ListSubscriptionsResult(ResponseElement): + SubscriptionList = MemberList(Subscription) + + +class OrderReferenceDetails(ResponseElement): + Buyer = Element() + OrderTotal = Element(ComplexMoney) + Destination = Element(PhysicalDestination=Element()) + SellerOrderAttributes = Element() + OrderReferenceStatus = Element() + Constraints = ElementList() + + +class SetOrderReferenceDetailsResult(ResponseElement): + OrderReferenceDetails = Element(OrderReferenceDetails) + + +class GetOrderReferenceDetailsResult(SetOrderReferenceDetailsResult): + pass + + +class AuthorizationDetails(ResponseElement): + AuthorizationAmount = Element(ComplexMoney) + CapturedAmount = Element(ComplexMoney) + AuthorizationFee = Element(ComplexMoney) + AuthorizationStatus = Element() + + +class AuthorizeResult(ResponseElement): + AuthorizationDetails = Element(AuthorizationDetails) + + +class GetAuthorizationDetailsResult(AuthorizeResult): + pass + + +class CaptureDetails(ResponseElement): + CaptureAmount = Element(ComplexMoney) + RefundedAmount = Element(ComplexMoney) + CaptureFee = Element(ComplexMoney) + CaptureStatus = Element() + + +class CaptureResult(ResponseElement): + CaptureDetails = Element(CaptureDetails) + + +class GetCaptureDetailsResult(CaptureResult): + pass + + +class RefundDetails(ResponseElement): + RefundAmount = Element(ComplexMoney) + FeeRefunded = Element(ComplexMoney) + RefundStatus = Element() + + +class RefundResult(ResponseElement): + RefundDetails = Element(RefundDetails) + + +class GetRefundDetails(RefundResult): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/opsworks/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/opsworks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1ff5c0f6cf50fef13dca8cc0fefc13a6218f7e3c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/opsworks/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon OpsWorks service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.opsworks.layer1 import OpsWorksConnection + return get_regions('opsworks', connection_cls=OpsWorksConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/opsworks/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/opsworks/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..da23e48521bf616600d59b6b1b7d7da6a7574600 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/opsworks/exceptions.py @@ -0,0 +1,30 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class ResourceNotFoundException(JSONResponseError): + pass + + +class ValidationException(JSONResponseError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/opsworks/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/opsworks/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..8894d1c30fabcc5fa9de66cdc9ca7c87d1ed09dc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/opsworks/layer1.py @@ -0,0 +1,3094 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.opsworks import exceptions + + +class OpsWorksConnection(AWSQueryConnection): + """ + AWS OpsWorks + Welcome to the AWS OpsWorks API Reference . This guide provides + descriptions, syntax, and usage examples about AWS OpsWorks + actions and data types, including common parameters and error + codes. + + AWS OpsWorks is an application management service that provides an + integrated experience for overseeing the complete application + lifecycle. For information about this product, go to the `AWS + OpsWorks`_ details page. + + **SDKs and CLI** + + The most common way to use the AWS OpsWorks API is by using the + AWS Command Line Interface (CLI) or by using one of the AWS SDKs + to implement applications in your preferred language. For more + information, see: + + + + `AWS CLI`_ + + `AWS SDK for Java`_ + + `AWS SDK for .NET`_ + + `AWS SDK for PHP 2`_ + + `AWS SDK for Ruby`_ + + `AWS SDK for Node.js`_ + + `AWS SDK for Python(Boto)`_ + + + **Endpoints** + + AWS OpsWorks supports only one endpoint, opsworks.us- + east-1.amazonaws.com (HTTPS), so you must connect to that + endpoint. You can then use the API to direct AWS OpsWorks to + create stacks in any AWS Region. + + **Chef Versions** + + When you call CreateStack, CloneStack, or UpdateStack we recommend + you use the `ConfigurationManager` parameter to specify the Chef + version, 0.9, 11.4, or 11.10. The default value is currently + 11.10. For more information, see `Chef Versions`_. + + You can still specify Chef 0.9 for your stack, but new features + are not available for Chef 0.9 stacks, and support is scheduled to + end on July 24, 2014. We do not recommend using Chef 0.9 for new + stacks, and we recommend migrating your existing Chef 0.9 stacks + to Chef 11.10 as soon as possible. + """ + APIVersion = "2013-02-18" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "opsworks.us-east-1.amazonaws.com" + ServiceName = "OpsWorks" + TargetPrefix = "OpsWorks_20130218" + ResponseError = JSONResponseError + + _faults = { + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "ValidationException": exceptions.ValidationException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(OpsWorksConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def assign_instance(self, instance_id, layer_ids): + """ + Assign a registered instance to a custom layer. You cannot use + this action with instances that were created with AWS + OpsWorks. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + :type layer_ids: list + :param layer_ids: The layer ID, which must correspond to a custom + layer. You cannot assign a registered instance to a built-in layer. + + """ + params = { + 'InstanceId': instance_id, + 'LayerIds': layer_ids, + } + return self.make_request(action='AssignInstance', + body=json.dumps(params)) + + def assign_volume(self, volume_id, instance_id=None): + """ + Assigns one of the stack's registered Amazon EBS volumes to a + specified instance. The volume must first be registered with + the stack by calling RegisterVolume. For more information, see + `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type volume_id: string + :param volume_id: The volume ID. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'VolumeId': volume_id, } + if instance_id is not None: + params['InstanceId'] = instance_id + return self.make_request(action='AssignVolume', + body=json.dumps(params)) + + def associate_elastic_ip(self, elastic_ip, instance_id=None): + """ + Associates one of the stack's registered Elastic IP addresses + with a specified instance. The address must first be + registered with the stack by calling RegisterElasticIp. For + more information, see `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP address. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'ElasticIp': elastic_ip, } + if instance_id is not None: + params['InstanceId'] = instance_id + return self.make_request(action='AssociateElasticIp', + body=json.dumps(params)) + + def attach_elastic_load_balancer(self, elastic_load_balancer_name, + layer_id): + """ + Attaches an Elastic Load Balancing load balancer to a + specified layer. For more information, see `Elastic Load + Balancing`_. + + + You must create the Elastic Load Balancing instance + separately, by using the Elastic Load Balancing console, API, + or CLI. For more information, see ` Elastic Load Balancing + Developer Guide`_. + + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_load_balancer_name: string + :param elastic_load_balancer_name: The Elastic Load Balancing + instance's name. + + :type layer_id: string + :param layer_id: The ID of the layer that the Elastic Load Balancing + instance is to be attached to. + + """ + params = { + 'ElasticLoadBalancerName': elastic_load_balancer_name, + 'LayerId': layer_id, + } + return self.make_request(action='AttachElasticLoadBalancer', + body=json.dumps(params)) + + def clone_stack(self, source_stack_id, service_role_arn, name=None, + region=None, vpc_id=None, attributes=None, + default_instance_profile_arn=None, default_os=None, + hostname_theme=None, default_availability_zone=None, + default_subnet_id=None, custom_json=None, + configuration_manager=None, chef_configuration=None, + use_custom_cookbooks=None, + use_opsworks_security_groups=None, + custom_cookbooks_source=None, default_ssh_key_name=None, + clone_permissions=None, clone_app_ids=None, + default_root_device_type=None): + """ + Creates a clone of a specified stack. For more information, + see `Clone a Stack`_. + + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + + :type source_stack_id: string + :param source_stack_id: The source stack ID. + + :type name: string + :param name: The cloned stack name. + + :type region: string + :param region: The cloned stack AWS region, such as "us-east-1". For + more information about AWS regions, see `Regions and Endpoints`_. + + :type vpc_id: string + :param vpc_id: The ID of the VPC that the cloned stack is to be + launched into. It must be in the specified region. All instances + are launched into this VPC, and you cannot change the ID later. + + + If your account supports EC2 Classic, the default value is no VPC. + + If your account does not support EC2 Classic, the default value is + the default VPC for the specified region. + + + If the VPC ID corresponds to a default VPC and you have specified + either the `DefaultAvailabilityZone` or the `DefaultSubnetId` + parameter only, AWS OpsWorks infers the value of the other + parameter. If you specify neither parameter, AWS OpsWorks sets + these parameters to the first valid Availability Zone for the + specified region and the corresponding default VPC subnet ID, + respectively. + + If you specify a nondefault VPC ID, note the following: + + + + It must belong to a VPC in your account that is in the specified + region. + + You must specify a value for `DefaultSubnetId`. + + + For more information on how to use AWS OpsWorks with a VPC, see + `Running a Stack in a VPC`_. For more information on default VPC + and EC2 Classic, see `Supported Platforms`_. + + :type attributes: map + :param attributes: A list of stack attributes and values as key/value + pairs to be added to the cloned stack. + + :type service_role_arn: string + :param service_role_arn: + The stack AWS Identity and Access Management (IAM) role, which allows + AWS OpsWorks to work with AWS resources on your behalf. You must + set this parameter to the Amazon Resource Name (ARN) for an + existing IAM role. If you create a stack by using the AWS OpsWorks + console, it creates the role for you. You can obtain an existing + stack's IAM ARN programmatically by calling DescribePermissions. + For more information about IAM ARNs, see `Using Identifiers`_. + + + You must set this parameter to a valid service role ARN or the action + will fail; there is no default value. You can specify the source + stack's service role ARN, if you prefer, but you must do so + explicitly. + + :type default_instance_profile_arn: string + :param default_instance_profile_arn: The ARN of an IAM profile that is + the default profile for all of the stack's EC2 instances. For more + information about IAM ARNs, see `Using Identifiers`_. + + :type default_os: string + :param default_os: The stacks's operating system, which must be set to + one of the following. + + + Standard operating systems: an Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom`. You specify the custom AMI you want to use + when you create instances. + + + The default option is the current Amazon Linux version. + + :type hostname_theme: string + :param hostname_theme: The stack's host name theme, with spaces are + replaced by underscores. The theme is used to generate host names + for the stack's instances. By default, `HostnameTheme` is set to + `Layer_Dependent`, which creates host names by appending integers + to the layer's short name. The other themes are: + + + `Baked_Goods` + + `Clouds` + + `European_Cities` + + `Fruits` + + `Greek_Deities` + + `Legendary_Creatures_from_Japan` + + `Planets_and_Moons` + + `Roman_Deities` + + `Scottish_Islands` + + `US_Cities` + + `Wild_Cats` + + + To obtain a generated host name, call `GetHostNameSuggestion`, which + returns a host name based on the current theme. + + :type default_availability_zone: string + :param default_availability_zone: The cloned stack's default + Availability Zone, which must be in the specified region. For more + information, see `Regions and Endpoints`_. If you also specify a + value for `DefaultSubnetId`, the subnet must be in the same zone. + For more information, see the `VpcId` parameter description. + + :type default_subnet_id: string + :param default_subnet_id: The stack's default VPC subnet ID. This + parameter is required if you specify a value for the `VpcId` + parameter. All instances are launched into this subnet unless you + specify otherwise when you create the instance. If you also specify + a value for `DefaultAvailabilityZone`, the subnet must be in that + zone. For information on default values and when this parameter is + required, see the `VpcId` parameter description. + + :type custom_json: string + :param custom_json: A string that contains user-defined, custom JSON. + It is used to override the corresponding default stack + configuration JSON values. The string should be in the following + format and must escape characters such as '"'.: + `"{\"key1\": \"value1\", \"key2\": \"value2\",...}"` + + For more information on custom JSON, see `Use Custom JSON to Modify the + Stack Configuration JSON`_ + + :type configuration_manager: dict + :param configuration_manager: The configuration manager. When you clone + a stack we recommend that you use the configuration manager to + specify the Chef version, 0.9, 11.4, or 11.10. The default value is + currently 11.4. + + :type chef_configuration: dict + :param chef_configuration: A `ChefConfiguration` object that specifies + whether to enable Berkshelf and the Berkshelf version on Chef 11.10 + stacks. For more information, see `Create a New Stack`_. + + :type use_custom_cookbooks: boolean + :param use_custom_cookbooks: Whether to use custom cookbooks. + + :type use_opsworks_security_groups: boolean + :param use_opsworks_security_groups: Whether to associate the AWS + OpsWorks built-in security groups with the stack's layers. + AWS OpsWorks provides a standard set of built-in security groups, one + for each layer, which are associated with layers by default. With + `UseOpsworksSecurityGroups` you can instead provide your own custom + security groups. `UseOpsworksSecurityGroups` has the following + settings: + + + + True - AWS OpsWorks automatically associates the appropriate built-in + security group with each layer (default setting). You can associate + additional security groups with a layer after you create it but you + cannot delete the built-in security group. + + False - AWS OpsWorks does not associate built-in security groups with + layers. You must create appropriate EC2 security groups and + associate a security group with each layer that you create. + However, you can still manually associate a built-in security group + with a layer on creation; custom security groups are required only + for those layers that need custom settings. + + + For more information, see `Create a New Stack`_. + + :type custom_cookbooks_source: dict + :param custom_cookbooks_source: Contains the information required to + retrieve an app or cookbook from a repository. For more + information, see `Creating Apps`_ or `Custom Recipes and + Cookbooks`_. + + :type default_ssh_key_name: string + :param default_ssh_key_name: A default SSH key for the stack instances. + You can override this value when you create or update an instance. + + :type clone_permissions: boolean + :param clone_permissions: Whether to clone the source stack's + permissions. + + :type clone_app_ids: list + :param clone_app_ids: A list of source stack app IDs to be included in + the cloned stack. + + :type default_root_device_type: string + :param default_root_device_type: The default root device type. This + value is used by default for all instances in the cloned stack, but + you can override it when you create an instance. For more + information, see `Storage for the Root Device`_. + + """ + params = { + 'SourceStackId': source_stack_id, + 'ServiceRoleArn': service_role_arn, + } + if name is not None: + params['Name'] = name + if region is not None: + params['Region'] = region + if vpc_id is not None: + params['VpcId'] = vpc_id + if attributes is not None: + params['Attributes'] = attributes + if default_instance_profile_arn is not None: + params['DefaultInstanceProfileArn'] = default_instance_profile_arn + if default_os is not None: + params['DefaultOs'] = default_os + if hostname_theme is not None: + params['HostnameTheme'] = hostname_theme + if default_availability_zone is not None: + params['DefaultAvailabilityZone'] = default_availability_zone + if default_subnet_id is not None: + params['DefaultSubnetId'] = default_subnet_id + if custom_json is not None: + params['CustomJson'] = custom_json + if configuration_manager is not None: + params['ConfigurationManager'] = configuration_manager + if chef_configuration is not None: + params['ChefConfiguration'] = chef_configuration + if use_custom_cookbooks is not None: + params['UseCustomCookbooks'] = use_custom_cookbooks + if use_opsworks_security_groups is not None: + params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups + if custom_cookbooks_source is not None: + params['CustomCookbooksSource'] = custom_cookbooks_source + if default_ssh_key_name is not None: + params['DefaultSshKeyName'] = default_ssh_key_name + if clone_permissions is not None: + params['ClonePermissions'] = clone_permissions + if clone_app_ids is not None: + params['CloneAppIds'] = clone_app_ids + if default_root_device_type is not None: + params['DefaultRootDeviceType'] = default_root_device_type + return self.make_request(action='CloneStack', + body=json.dumps(params)) + + def create_app(self, stack_id, name, type, shortname=None, + description=None, data_sources=None, app_source=None, + domains=None, enable_ssl=None, ssl_configuration=None, + attributes=None, environment=None): + """ + Creates an app for a specified stack. For more information, + see `Creating Apps`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type shortname: string + :param shortname: The app's short name. + + :type name: string + :param name: The app name. + + :type description: string + :param description: A description of the app. + + :type data_sources: list + :param data_sources: The app's data source. + + :type type: string + :param type: The app type. Each supported type is associated with a + particular layer. For example, PHP applications are associated with + a PHP layer. AWS OpsWorks deploys an application to those instances + that are members of the corresponding layer. + + :type app_source: dict + :param app_source: A `Source` object that specifies the app repository. + + :type domains: list + :param domains: The app virtual host settings, with multiple domains + separated by commas. For example: `'www.example.com, example.com'` + + :type enable_ssl: boolean + :param enable_ssl: Whether to enable SSL for the app. + + :type ssl_configuration: dict + :param ssl_configuration: An `SslConfiguration` object with the SSL + configuration. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes. + + :type environment: list + :param environment: + An array of `EnvironmentVariable` objects that specify environment + variables to be associated with the app. You can specify up to ten + environment variables. After you deploy the app, these variables + are defined on the associated app server instance. + + This parameter is supported only by Chef 11.10 stacks. If you have + specified one or more environment variables, you cannot modify the + stack's Chef version. + + """ + params = {'StackId': stack_id, 'Name': name, 'Type': type, } + if shortname is not None: + params['Shortname'] = shortname + if description is not None: + params['Description'] = description + if data_sources is not None: + params['DataSources'] = data_sources + if app_source is not None: + params['AppSource'] = app_source + if domains is not None: + params['Domains'] = domains + if enable_ssl is not None: + params['EnableSsl'] = enable_ssl + if ssl_configuration is not None: + params['SslConfiguration'] = ssl_configuration + if attributes is not None: + params['Attributes'] = attributes + if environment is not None: + params['Environment'] = environment + return self.make_request(action='CreateApp', + body=json.dumps(params)) + + def create_deployment(self, stack_id, command, app_id=None, + instance_ids=None, comment=None, custom_json=None): + """ + Runs deployment or stack commands. For more information, see + `Deploying Apps`_ and `Run Stack Commands`_. + + **Required Permissions**: To use this action, an IAM user must + have a Deploy or Manage permissions level for the stack, or an + attached policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type app_id: string + :param app_id: The app ID. This parameter is required for app + deployments, but not for other deployment commands. + + :type instance_ids: list + :param instance_ids: The instance IDs for the deployment targets. + + :type command: dict + :param command: A `DeploymentCommand` object that specifies the + deployment command and any associated arguments. + + :type comment: string + :param comment: A user-defined comment. + + :type custom_json: string + :param custom_json: A string that contains user-defined, custom JSON. + It is used to override the corresponding default stack + configuration JSON values. The string should be in the following + format and must escape characters such as '"'.: + `"{\"key1\": \"value1\", \"key2\": \"value2\",...}"` + + For more information on custom JSON, see `Use Custom JSON to Modify the + Stack Configuration JSON`_. + + """ + params = {'StackId': stack_id, 'Command': command, } + if app_id is not None: + params['AppId'] = app_id + if instance_ids is not None: + params['InstanceIds'] = instance_ids + if comment is not None: + params['Comment'] = comment + if custom_json is not None: + params['CustomJson'] = custom_json + return self.make_request(action='CreateDeployment', + body=json.dumps(params)) + + def create_instance(self, stack_id, layer_ids, instance_type, + auto_scaling_type=None, hostname=None, os=None, + ami_id=None, ssh_key_name=None, + availability_zone=None, virtualization_type=None, + subnet_id=None, architecture=None, + root_device_type=None, install_updates_on_boot=None, + ebs_optimized=None): + """ + Creates an instance in a specified stack. For more + information, see `Adding an Instance to a Layer`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type layer_ids: list + :param layer_ids: An array that contains the instance layer IDs. + + :type instance_type: string + :param instance_type: The instance type. AWS OpsWorks supports all + instance types except Cluster Compute, Cluster GPU, and High Memory + Cluster. For more information, see `Instance Families and Types`_. + The parameter values that you use to specify the various types are + in the API Name column of the Available Instance Types table. + + :type auto_scaling_type: string + :param auto_scaling_type: For load-based or time-based instances, the + type. + + :type hostname: string + :param hostname: The instance host name. + + :type os: string + :param os: The instance's operating system, which must be set to one of + the following. + + + Standard operating systems: an Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom` + + + The default option is the current Amazon Linux version. If you set this + parameter to `Custom`, you must use the CreateInstance action's + AmiId parameter to specify the custom AMI that you want to use. For + more information on the standard operating systems, see `Operating + Systems`_For more information on how to use custom AMIs with + OpsWorks, see `Using Custom AMIs`_. + + :type ami_id: string + :param ami_id: + A custom AMI ID to be used to create the instance. The AMI should be + based on one of the standard AWS OpsWorks AMIs: Amazon Linux, + Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see + `Instances`_. + + If you specify a custom AMI, you must set `Os` to `Custom`. + + :type ssh_key_name: string + :param ssh_key_name: The instance SSH key name. + + :type availability_zone: string + :param availability_zone: The instance Availability Zone. For more + information, see `Regions and Endpoints`_. + + :type virtualization_type: string + :param virtualization_type: The instance's virtualization type, + `paravirtual` or `hvm`. + + :type subnet_id: string + :param subnet_id: The ID of the instance's subnet. If the stack is + running in a VPC, you can use this parameter to override the + stack's default subnet ID value and direct AWS OpsWorks to launch + the instance in a different subnet. + + :type architecture: string + :param architecture: The instance architecture. The default option is + `x86_64`. Instance types do not necessarily support both + architectures. For a list of the architectures that are supported + by the different instance types, see `Instance Families and + Types`_. + + :type root_device_type: string + :param root_device_type: The instance root device type. For more + information, see `Storage for the Root Device`_. + + :type install_updates_on_boot: boolean + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + + We strongly recommend using the default value of `True` to ensure that + your instances have the latest security updates. + + :type ebs_optimized: boolean + :param ebs_optimized: Whether to create an Amazon EBS-optimized + instance. + + """ + params = { + 'StackId': stack_id, + 'LayerIds': layer_ids, + 'InstanceType': instance_type, + } + if auto_scaling_type is not None: + params['AutoScalingType'] = auto_scaling_type + if hostname is not None: + params['Hostname'] = hostname + if os is not None: + params['Os'] = os + if ami_id is not None: + params['AmiId'] = ami_id + if ssh_key_name is not None: + params['SshKeyName'] = ssh_key_name + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if virtualization_type is not None: + params['VirtualizationType'] = virtualization_type + if subnet_id is not None: + params['SubnetId'] = subnet_id + if architecture is not None: + params['Architecture'] = architecture + if root_device_type is not None: + params['RootDeviceType'] = root_device_type + if install_updates_on_boot is not None: + params['InstallUpdatesOnBoot'] = install_updates_on_boot + if ebs_optimized is not None: + params['EbsOptimized'] = ebs_optimized + return self.make_request(action='CreateInstance', + body=json.dumps(params)) + + def create_layer(self, stack_id, type, name, shortname, attributes=None, + custom_instance_profile_arn=None, + custom_security_group_ids=None, packages=None, + volume_configurations=None, enable_auto_healing=None, + auto_assign_elastic_ips=None, + auto_assign_public_ips=None, custom_recipes=None, + install_updates_on_boot=None, + use_ebs_optimized_instances=None, + lifecycle_event_configuration=None): + """ + Creates a layer. For more information, see `How to Create a + Layer`_. + + + You should use **CreateLayer** for noncustom layer types such + as PHP App Server only if the stack does not have an existing + layer of that type. A stack can have at most one instance of + each noncustom layer; if you attempt to create a second + instance, **CreateLayer** fails. A stack can have an arbitrary + number of custom layers, so you can call **CreateLayer** as + many times as you like for that layer type. + + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The layer stack ID. + + :type type: string + :param type: The layer type. A stack cannot have more than one built-in + layer of the same type. It can have any number of custom layers. + + :type name: string + :param name: The layer name, which is used by the console. + + :type shortname: string + :param shortname: The layer short name, which is used internally by AWS + OpsWorks and by Chef recipes. The short name is also used as the + name for the directory where your app files are installed. It can + have a maximum of 200 characters, which are limited to the + alphanumeric characters, '-', '_', and '.'. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes. + + :type custom_instance_profile_arn: string + :param custom_instance_profile_arn: The ARN of an IAM profile that to + be used for the layer's EC2 instances. For more information about + IAM ARNs, see `Using Identifiers`_. + + :type custom_security_group_ids: list + :param custom_security_group_ids: An array containing the layer custom + security group IDs. + + :type packages: list + :param packages: An array of `Package` objects that describe the layer + packages. + + :type volume_configurations: list + :param volume_configurations: A `VolumeConfigurations` object that + describes the layer's Amazon EBS volumes. + + :type enable_auto_healing: boolean + :param enable_auto_healing: Whether to disable auto healing for the + layer. + + :type auto_assign_elastic_ips: boolean + :param auto_assign_elastic_ips: Whether to automatically assign an + `Elastic IP address`_ to the layer's instances. For more + information, see `How to Edit a Layer`_. + + :type auto_assign_public_ips: boolean + :param auto_assign_public_ips: For stacks that are running in a VPC, + whether to automatically assign a public IP address to the layer's + instances. For more information, see `How to Edit a Layer`_. + + :type custom_recipes: dict + :param custom_recipes: A `LayerCustomRecipes` object that specifies the + layer custom recipes. + + :type install_updates_on_boot: boolean + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. + + :type use_ebs_optimized_instances: boolean + :param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized + instances. + + :type lifecycle_event_configuration: dict + :param lifecycle_event_configuration: A LifeCycleEventConfiguration + object that you can use to configure the Shutdown event to specify + an execution timeout and enable or disable Elastic Load Balancer + connection draining. + + """ + params = { + 'StackId': stack_id, + 'Type': type, + 'Name': name, + 'Shortname': shortname, + } + if attributes is not None: + params['Attributes'] = attributes + if custom_instance_profile_arn is not None: + params['CustomInstanceProfileArn'] = custom_instance_profile_arn + if custom_security_group_ids is not None: + params['CustomSecurityGroupIds'] = custom_security_group_ids + if packages is not None: + params['Packages'] = packages + if volume_configurations is not None: + params['VolumeConfigurations'] = volume_configurations + if enable_auto_healing is not None: + params['EnableAutoHealing'] = enable_auto_healing + if auto_assign_elastic_ips is not None: + params['AutoAssignElasticIps'] = auto_assign_elastic_ips + if auto_assign_public_ips is not None: + params['AutoAssignPublicIps'] = auto_assign_public_ips + if custom_recipes is not None: + params['CustomRecipes'] = custom_recipes + if install_updates_on_boot is not None: + params['InstallUpdatesOnBoot'] = install_updates_on_boot + if use_ebs_optimized_instances is not None: + params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances + if lifecycle_event_configuration is not None: + params['LifecycleEventConfiguration'] = lifecycle_event_configuration + return self.make_request(action='CreateLayer', + body=json.dumps(params)) + + def create_stack(self, name, region, service_role_arn, + default_instance_profile_arn, vpc_id=None, + attributes=None, default_os=None, hostname_theme=None, + default_availability_zone=None, default_subnet_id=None, + custom_json=None, configuration_manager=None, + chef_configuration=None, use_custom_cookbooks=None, + use_opsworks_security_groups=None, + custom_cookbooks_source=None, default_ssh_key_name=None, + default_root_device_type=None): + """ + Creates a new stack. For more information, see `Create a New + Stack`_. + + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + + :type name: string + :param name: The stack name. + + :type region: string + :param region: The stack AWS region, such as "us-east-1". For more + information about Amazon regions, see `Regions and Endpoints`_. + + :type vpc_id: string + :param vpc_id: The ID of the VPC that the stack is to be launched into. + It must be in the specified region. All instances are launched into + this VPC, and you cannot change the ID later. + + + If your account supports EC2 Classic, the default value is no VPC. + + If your account does not support EC2 Classic, the default value is + the default VPC for the specified region. + + + If the VPC ID corresponds to a default VPC and you have specified + either the `DefaultAvailabilityZone` or the `DefaultSubnetId` + parameter only, AWS OpsWorks infers the value of the other + parameter. If you specify neither parameter, AWS OpsWorks sets + these parameters to the first valid Availability Zone for the + specified region and the corresponding default VPC subnet ID, + respectively. + + If you specify a nondefault VPC ID, note the following: + + + + It must belong to a VPC in your account that is in the specified + region. + + You must specify a value for `DefaultSubnetId`. + + + For more information on how to use AWS OpsWorks with a VPC, see + `Running a Stack in a VPC`_. For more information on default VPC + and EC2 Classic, see `Supported Platforms`_. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes. + + :type service_role_arn: string + :param service_role_arn: The stack AWS Identity and Access Management + (IAM) role, which allows AWS OpsWorks to work with AWS resources on + your behalf. You must set this parameter to the Amazon Resource + Name (ARN) for an existing IAM role. For more information about IAM + ARNs, see `Using Identifiers`_. + + :type default_instance_profile_arn: string + :param default_instance_profile_arn: The ARN of an IAM profile that is + the default profile for all of the stack's EC2 instances. For more + information about IAM ARNs, see `Using Identifiers`_. + + :type default_os: string + :param default_os: The stack's operating system, which must be set to + one of the following. + + + Standard operating systems: an Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom`. You specify the custom AMI you want to use + when you create instances. + + + The default option is the current Amazon Linux version. + + :type hostname_theme: string + :param hostname_theme: The stack's host name theme, with spaces are + replaced by underscores. The theme is used to generate host names + for the stack's instances. By default, `HostnameTheme` is set to + `Layer_Dependent`, which creates host names by appending integers + to the layer's short name. The other themes are: + + + `Baked_Goods` + + `Clouds` + + `European_Cities` + + `Fruits` + + `Greek_Deities` + + `Legendary_Creatures_from_Japan` + + `Planets_and_Moons` + + `Roman_Deities` + + `Scottish_Islands` + + `US_Cities` + + `Wild_Cats` + + + To obtain a generated host name, call `GetHostNameSuggestion`, which + returns a host name based on the current theme. + + :type default_availability_zone: string + :param default_availability_zone: The stack's default Availability + Zone, which must be in the specified region. For more information, + see `Regions and Endpoints`_. If you also specify a value for + `DefaultSubnetId`, the subnet must be in the same zone. For more + information, see the `VpcId` parameter description. + + :type default_subnet_id: string + :param default_subnet_id: The stack's default VPC subnet ID. This + parameter is required if you specify a value for the `VpcId` + parameter. All instances are launched into this subnet unless you + specify otherwise when you create the instance. If you also specify + a value for `DefaultAvailabilityZone`, the subnet must be in that + zone. For information on default values and when this parameter is + required, see the `VpcId` parameter description. + + :type custom_json: string + :param custom_json: A string that contains user-defined, custom JSON. + It is used to override the corresponding default stack + configuration JSON values. The string should be in the following + format and must escape characters such as '"'.: + `"{\"key1\": \"value1\", \"key2\": \"value2\",...}"` + + For more information on custom JSON, see `Use Custom JSON to Modify the + Stack Configuration JSON`_. + + :type configuration_manager: dict + :param configuration_manager: The configuration manager. When you clone + a stack we recommend that you use the configuration manager to + specify the Chef version, 0.9, 11.4, or 11.10. The default value is + currently 11.4. + + :type chef_configuration: dict + :param chef_configuration: A `ChefConfiguration` object that specifies + whether to enable Berkshelf and the Berkshelf version on Chef 11.10 + stacks. For more information, see `Create a New Stack`_. + + :type use_custom_cookbooks: boolean + :param use_custom_cookbooks: Whether the stack uses custom cookbooks. + + :type use_opsworks_security_groups: boolean + :param use_opsworks_security_groups: Whether to associate the AWS + OpsWorks built-in security groups with the stack's layers. + AWS OpsWorks provides a standard set of built-in security groups, one + for each layer, which are associated with layers by default. With + `UseOpsworksSecurityGroups` you can instead provide your own custom + security groups. `UseOpsworksSecurityGroups` has the following + settings: + + + + True - AWS OpsWorks automatically associates the appropriate built-in + security group with each layer (default setting). You can associate + additional security groups with a layer after you create it but you + cannot delete the built-in security group. + + False - AWS OpsWorks does not associate built-in security groups with + layers. You must create appropriate EC2 security groups and + associate a security group with each layer that you create. + However, you can still manually associate a built-in security group + with a layer on creation; custom security groups are required only + for those layers that need custom settings. + + + For more information, see `Create a New Stack`_. + + :type custom_cookbooks_source: dict + :param custom_cookbooks_source: Contains the information required to + retrieve an app or cookbook from a repository. For more + information, see `Creating Apps`_ or `Custom Recipes and + Cookbooks`_. + + :type default_ssh_key_name: string + :param default_ssh_key_name: A default SSH key for the stack instances. + You can override this value when you create or update an instance. + + :type default_root_device_type: string + :param default_root_device_type: The default root device type. This + value is used by default for all instances in the stack, but you + can override it when you create an instance. The default option is + `instance-store`. For more information, see `Storage for the Root + Device`_. + + """ + params = { + 'Name': name, + 'Region': region, + 'ServiceRoleArn': service_role_arn, + 'DefaultInstanceProfileArn': default_instance_profile_arn, + } + if vpc_id is not None: + params['VpcId'] = vpc_id + if attributes is not None: + params['Attributes'] = attributes + if default_os is not None: + params['DefaultOs'] = default_os + if hostname_theme is not None: + params['HostnameTheme'] = hostname_theme + if default_availability_zone is not None: + params['DefaultAvailabilityZone'] = default_availability_zone + if default_subnet_id is not None: + params['DefaultSubnetId'] = default_subnet_id + if custom_json is not None: + params['CustomJson'] = custom_json + if configuration_manager is not None: + params['ConfigurationManager'] = configuration_manager + if chef_configuration is not None: + params['ChefConfiguration'] = chef_configuration + if use_custom_cookbooks is not None: + params['UseCustomCookbooks'] = use_custom_cookbooks + if use_opsworks_security_groups is not None: + params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups + if custom_cookbooks_source is not None: + params['CustomCookbooksSource'] = custom_cookbooks_source + if default_ssh_key_name is not None: + params['DefaultSshKeyName'] = default_ssh_key_name + if default_root_device_type is not None: + params['DefaultRootDeviceType'] = default_root_device_type + return self.make_request(action='CreateStack', + body=json.dumps(params)) + + def create_user_profile(self, iam_user_arn, ssh_username=None, + ssh_public_key=None, allow_self_management=None): + """ + Creates a new user profile. + + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + + :type iam_user_arn: string + :param iam_user_arn: The user's IAM ARN. + + :type ssh_username: string + :param ssh_username: The user's SSH user name. The allowable characters + are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name + includes other punctuation marks, AWS OpsWorks removes them. For + example, `my.name` will be changed to `myname`. If you do not + specify an SSH user name, AWS OpsWorks generates one from the IAM + user name. + + :type ssh_public_key: string + :param ssh_public_key: The user's public SSH key. + + :type allow_self_management: boolean + :param allow_self_management: Whether users can specify their own SSH + public key through the My Settings page. For more information, see + `Setting an IAM User's Public SSH Key`_. + + """ + params = {'IamUserArn': iam_user_arn, } + if ssh_username is not None: + params['SshUsername'] = ssh_username + if ssh_public_key is not None: + params['SshPublicKey'] = ssh_public_key + if allow_self_management is not None: + params['AllowSelfManagement'] = allow_self_management + return self.make_request(action='CreateUserProfile', + body=json.dumps(params)) + + def delete_app(self, app_id): + """ + Deletes a specified app. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type app_id: string + :param app_id: The app ID. + + """ + params = {'AppId': app_id, } + return self.make_request(action='DeleteApp', + body=json.dumps(params)) + + def delete_instance(self, instance_id, delete_elastic_ip=None, + delete_volumes=None): + """ + Deletes a specified instance, which terminates the associated + Amazon EC2 instance. You must stop an instance before you can + delete it. + + For more information, see `Deleting Instances`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + :type delete_elastic_ip: boolean + :param delete_elastic_ip: Whether to delete the instance Elastic IP + address. + + :type delete_volumes: boolean + :param delete_volumes: Whether to delete the instance's Amazon EBS + volumes. + + """ + params = {'InstanceId': instance_id, } + if delete_elastic_ip is not None: + params['DeleteElasticIp'] = delete_elastic_ip + if delete_volumes is not None: + params['DeleteVolumes'] = delete_volumes + return self.make_request(action='DeleteInstance', + body=json.dumps(params)) + + def delete_layer(self, layer_id): + """ + Deletes a specified layer. You must first stop and then delete + all associated instances or unassign registered instances. For + more information, see `How to Delete a Layer`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type layer_id: string + :param layer_id: The layer ID. + + """ + params = {'LayerId': layer_id, } + return self.make_request(action='DeleteLayer', + body=json.dumps(params)) + + def delete_stack(self, stack_id): + """ + Deletes a specified stack. You must first delete all + instances, layers, and apps or deregister registered + instances. For more information, see `Shut Down a Stack`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + return self.make_request(action='DeleteStack', + body=json.dumps(params)) + + def delete_user_profile(self, iam_user_arn): + """ + Deletes a user profile. + + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + + :type iam_user_arn: string + :param iam_user_arn: The user's IAM ARN. + + """ + params = {'IamUserArn': iam_user_arn, } + return self.make_request(action='DeleteUserProfile', + body=json.dumps(params)) + + def deregister_elastic_ip(self, elastic_ip): + """ + Deregisters a specified Elastic IP address. The address can + then be registered by another stack. For more information, see + `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP address. + + """ + params = {'ElasticIp': elastic_ip, } + return self.make_request(action='DeregisterElasticIp', + body=json.dumps(params)) + + def deregister_instance(self, instance_id): + """ + Deregister a registered Amazon EC2 or on-premises instance. + This action removes the instance from the stack and returns it + to your control. This action can not be used with instances + that were created with AWS OpsWorks. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='DeregisterInstance', + body=json.dumps(params)) + + def deregister_rds_db_instance(self, rds_db_instance_arn): + """ + Deregisters an Amazon RDS instance. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type rds_db_instance_arn: string + :param rds_db_instance_arn: The Amazon RDS instance's ARN. + + """ + params = {'RdsDbInstanceArn': rds_db_instance_arn, } + return self.make_request(action='DeregisterRdsDbInstance', + body=json.dumps(params)) + + def deregister_volume(self, volume_id): + """ + Deregisters an Amazon EBS volume. The volume can then be + registered by another stack. For more information, see + `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type volume_id: string + :param volume_id: The volume ID. + + """ + params = {'VolumeId': volume_id, } + return self.make_request(action='DeregisterVolume', + body=json.dumps(params)) + + def describe_apps(self, stack_id=None, app_ids=None): + """ + Requests a description of a specified set of apps. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The app stack ID. If you use this parameter, + `DescribeApps` returns a description of the apps in the specified + stack. + + :type app_ids: list + :param app_ids: An array of app IDs for the apps to be described. If + you use this parameter, `DescribeApps` returns a description of the + specified apps. Otherwise, it returns a description of every app. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if app_ids is not None: + params['AppIds'] = app_ids + return self.make_request(action='DescribeApps', + body=json.dumps(params)) + + def describe_commands(self, deployment_id=None, instance_id=None, + command_ids=None): + """ + Describes the results of specified commands. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type deployment_id: string + :param deployment_id: The deployment ID. If you include this parameter, + `DescribeCommands` returns a description of the commands associated + with the specified deployment. + + :type instance_id: string + :param instance_id: The instance ID. If you include this parameter, + `DescribeCommands` returns a description of the commands associated + with the specified instance. + + :type command_ids: list + :param command_ids: An array of command IDs. If you include this + parameter, `DescribeCommands` returns a description of the + specified commands. Otherwise, it returns a description of every + command. + + """ + params = {} + if deployment_id is not None: + params['DeploymentId'] = deployment_id + if instance_id is not None: + params['InstanceId'] = instance_id + if command_ids is not None: + params['CommandIds'] = command_ids + return self.make_request(action='DescribeCommands', + body=json.dumps(params)) + + def describe_deployments(self, stack_id=None, app_id=None, + deployment_ids=None): + """ + Requests a description of a specified set of deployments. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. If you include this parameter, + `DescribeDeployments` returns a description of the commands + associated with the specified stack. + + :type app_id: string + :param app_id: The app ID. If you include this parameter, + `DescribeDeployments` returns a description of the commands + associated with the specified app. + + :type deployment_ids: list + :param deployment_ids: An array of deployment IDs to be described. If + you include this parameter, `DescribeDeployments` returns a + description of the specified deployments. Otherwise, it returns a + description of every deployment. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if app_id is not None: + params['AppId'] = app_id + if deployment_ids is not None: + params['DeploymentIds'] = deployment_ids + return self.make_request(action='DescribeDeployments', + body=json.dumps(params)) + + def describe_elastic_ips(self, instance_id=None, stack_id=None, ips=None): + """ + Describes `Elastic IP addresses`_. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. If you include this parameter, + `DescribeElasticIps` returns a description of the Elastic IP + addresses associated with the specified instance. + + :type stack_id: string + :param stack_id: A stack ID. If you include this parameter, + `DescribeElasticIps` returns a description of the Elastic IP + addresses that are registered with the specified stack. + + :type ips: list + :param ips: An array of Elastic IP addresses to be described. If you + include this parameter, `DescribeElasticIps` returns a description + of the specified Elastic IP addresses. Otherwise, it returns a + description of every Elastic IP address. + + """ + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + if stack_id is not None: + params['StackId'] = stack_id + if ips is not None: + params['Ips'] = ips + return self.make_request(action='DescribeElasticIps', + body=json.dumps(params)) + + def describe_elastic_load_balancers(self, stack_id=None, layer_ids=None): + """ + Describes a stack's Elastic Load Balancing instances. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: A stack ID. The action describes the stack's Elastic + Load Balancing instances. + + :type layer_ids: list + :param layer_ids: A list of layer IDs. The action describes the Elastic + Load Balancing instances for the specified layers. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if layer_ids is not None: + params['LayerIds'] = layer_ids + return self.make_request(action='DescribeElasticLoadBalancers', + body=json.dumps(params)) + + def describe_instances(self, stack_id=None, layer_id=None, + instance_ids=None): + """ + Requests a description of a set of instances. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: A stack ID. If you use this parameter, + `DescribeInstances` returns descriptions of the instances + associated with the specified stack. + + :type layer_id: string + :param layer_id: A layer ID. If you use this parameter, + `DescribeInstances` returns descriptions of the instances + associated with the specified layer. + + :type instance_ids: list + :param instance_ids: An array of instance IDs to be described. If you + use this parameter, `DescribeInstances` returns a description of + the specified instances. Otherwise, it returns a description of + every instance. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if layer_id is not None: + params['LayerId'] = layer_id + if instance_ids is not None: + params['InstanceIds'] = instance_ids + return self.make_request(action='DescribeInstances', + body=json.dumps(params)) + + def describe_layers(self, stack_id=None, layer_ids=None): + """ + Requests a description of one or more layers in a specified + stack. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type layer_ids: list + :param layer_ids: An array of layer IDs that specify the layers to be + described. If you omit this parameter, `DescribeLayers` returns a + description of every layer in the specified stack. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if layer_ids is not None: + params['LayerIds'] = layer_ids + return self.make_request(action='DescribeLayers', + body=json.dumps(params)) + + def describe_load_based_auto_scaling(self, layer_ids): + """ + Describes load-based auto scaling configurations for specified + layers. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type layer_ids: list + :param layer_ids: An array of layer IDs. + + """ + params = {'LayerIds': layer_ids, } + return self.make_request(action='DescribeLoadBasedAutoScaling', + body=json.dumps(params)) + + def describe_my_user_profile(self): + """ + Describes a user's SSH information. + + **Required Permissions**: To use this action, an IAM user must + have self-management enabled or an attached policy that + explicitly grants permissions. For more information on user + permissions, see `Managing User Permissions`_. + + + """ + params = {} + return self.make_request(action='DescribeMyUserProfile', + body=json.dumps(params)) + + def describe_permissions(self, iam_user_arn=None, stack_id=None): + """ + Describes the permissions for a specified stack. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type iam_user_arn: string + :param iam_user_arn: The user's IAM ARN. For more information about IAM + ARNs, see `Using Identifiers`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {} + if iam_user_arn is not None: + params['IamUserArn'] = iam_user_arn + if stack_id is not None: + params['StackId'] = stack_id + return self.make_request(action='DescribePermissions', + body=json.dumps(params)) + + def describe_raid_arrays(self, instance_id=None, stack_id=None, + raid_array_ids=None): + """ + Describe an instance's RAID arrays. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. If you use this parameter, + `DescribeRaidArrays` returns descriptions of the RAID arrays + associated with the specified instance. + + :type stack_id: string + :param stack_id: The stack ID. + + :type raid_array_ids: list + :param raid_array_ids: An array of RAID array IDs. If you use this + parameter, `DescribeRaidArrays` returns descriptions of the + specified arrays. Otherwise, it returns a description of every + array. + + """ + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + if stack_id is not None: + params['StackId'] = stack_id + if raid_array_ids is not None: + params['RaidArrayIds'] = raid_array_ids + return self.make_request(action='DescribeRaidArrays', + body=json.dumps(params)) + + def describe_rds_db_instances(self, stack_id, rds_db_instance_arns=None): + """ + Describes Amazon RDS instances. + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID that the instances are registered with. + The operation returns descriptions of all registered Amazon RDS + instances. + + :type rds_db_instance_arns: list + :param rds_db_instance_arns: An array containing the ARNs of the + instances to be described. + + """ + params = {'StackId': stack_id, } + if rds_db_instance_arns is not None: + params['RdsDbInstanceArns'] = rds_db_instance_arns + return self.make_request(action='DescribeRdsDbInstances', + body=json.dumps(params)) + + def describe_service_errors(self, stack_id=None, instance_id=None, + service_error_ids=None): + """ + Describes AWS OpsWorks service errors. + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. If you use this parameter, + `DescribeServiceErrors` returns descriptions of the errors + associated with the specified stack. + + :type instance_id: string + :param instance_id: The instance ID. If you use this parameter, + `DescribeServiceErrors` returns descriptions of the errors + associated with the specified instance. + + :type service_error_ids: list + :param service_error_ids: An array of service error IDs. If you use + this parameter, `DescribeServiceErrors` returns descriptions of the + specified errors. Otherwise, it returns a description of every + error. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if instance_id is not None: + params['InstanceId'] = instance_id + if service_error_ids is not None: + params['ServiceErrorIds'] = service_error_ids + return self.make_request(action='DescribeServiceErrors', + body=json.dumps(params)) + + def describe_stack_provisioning_parameters(self, stack_id): + """ + Requests a description of a stack's provisioning parameters. + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the stack + or an attached policy that explicitly grants permissions. For + more information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID + + """ + params = {'StackId': stack_id, } + return self.make_request(action='DescribeStackProvisioningParameters', + body=json.dumps(params)) + + def describe_stack_summary(self, stack_id): + """ + Describes the number of layers and apps in a specified stack, + and the number of instances in each state, such as + `running_setup` or `online`. + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + return self.make_request(action='DescribeStackSummary', + body=json.dumps(params)) + + def describe_stacks(self, stack_ids=None): + """ + Requests a description of one or more stacks. + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_ids: list + :param stack_ids: An array of stack IDs that specify the stacks to be + described. If you omit this parameter, `DescribeStacks` returns a + description of every stack. + + """ + params = {} + if stack_ids is not None: + params['StackIds'] = stack_ids + return self.make_request(action='DescribeStacks', + body=json.dumps(params)) + + def describe_time_based_auto_scaling(self, instance_ids): + """ + Describes time-based auto scaling configurations for specified + instances. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type instance_ids: list + :param instance_ids: An array of instance IDs. + + """ + params = {'InstanceIds': instance_ids, } + return self.make_request(action='DescribeTimeBasedAutoScaling', + body=json.dumps(params)) + + def describe_user_profiles(self, iam_user_arns=None): + """ + Describe specified users. + + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + + :type iam_user_arns: list + :param iam_user_arns: An array of IAM user ARNs that identify the users + to be described. + + """ + params = {} + if iam_user_arns is not None: + params['IamUserArns'] = iam_user_arns + return self.make_request(action='DescribeUserProfiles', + body=json.dumps(params)) + + def describe_volumes(self, instance_id=None, stack_id=None, + raid_array_id=None, volume_ids=None): + """ + Describes an instance's Amazon EBS volumes. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. If you use this parameter, + `DescribeVolumes` returns descriptions of the volumes associated + with the specified instance. + + :type stack_id: string + :param stack_id: A stack ID. The action describes the stack's + registered Amazon EBS volumes. + + :type raid_array_id: string + :param raid_array_id: The RAID array ID. If you use this parameter, + `DescribeVolumes` returns descriptions of the volumes associated + with the specified RAID array. + + :type volume_ids: list + :param volume_ids: Am array of volume IDs. If you use this parameter, + `DescribeVolumes` returns descriptions of the specified volumes. + Otherwise, it returns a description of every volume. + + """ + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + if stack_id is not None: + params['StackId'] = stack_id + if raid_array_id is not None: + params['RaidArrayId'] = raid_array_id + if volume_ids is not None: + params['VolumeIds'] = volume_ids + return self.make_request(action='DescribeVolumes', + body=json.dumps(params)) + + def detach_elastic_load_balancer(self, elastic_load_balancer_name, + layer_id): + """ + Detaches a specified Elastic Load Balancing instance from its + layer. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_load_balancer_name: string + :param elastic_load_balancer_name: The Elastic Load Balancing + instance's name. + + :type layer_id: string + :param layer_id: The ID of the layer that the Elastic Load Balancing + instance is attached to. + + """ + params = { + 'ElasticLoadBalancerName': elastic_load_balancer_name, + 'LayerId': layer_id, + } + return self.make_request(action='DetachElasticLoadBalancer', + body=json.dumps(params)) + + def disassociate_elastic_ip(self, elastic_ip): + """ + Disassociates an Elastic IP address from its instance. The + address remains registered with the stack. For more + information, see `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP address. + + """ + params = {'ElasticIp': elastic_ip, } + return self.make_request(action='DisassociateElasticIp', + body=json.dumps(params)) + + def get_hostname_suggestion(self, layer_id): + """ + Gets a generated host name for the specified layer, based on + the current host name theme. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type layer_id: string + :param layer_id: The layer ID. + + """ + params = {'LayerId': layer_id, } + return self.make_request(action='GetHostnameSuggestion', + body=json.dumps(params)) + + def reboot_instance(self, instance_id): + """ + Reboots a specified instance. For more information, see + `Starting, Stopping, and Rebooting Instances`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='RebootInstance', + body=json.dumps(params)) + + def register_elastic_ip(self, elastic_ip, stack_id): + """ + Registers an Elastic IP address with a specified stack. An + address can be registered with only one stack at a time. If + the address is already registered, you must first deregister + it by calling DeregisterElasticIp. For more information, see + `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP address. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'ElasticIp': elastic_ip, 'StackId': stack_id, } + return self.make_request(action='RegisterElasticIp', + body=json.dumps(params)) + + def register_instance(self, stack_id, hostname=None, public_ip=None, + private_ip=None, rsa_public_key=None, + rsa_public_key_fingerprint=None, + instance_identity=None): + """ + Registers instances with a specified stack that were created + outside of AWS OpsWorks. + + We do not recommend using this action to register instances. + The complete registration operation has two primary steps, + installing the AWS OpsWorks agent on the instance and + registering the instance with the stack. `RegisterInstance` + handles only the second step. You should instead use the AWS + CLI `register` command, which performs the entire registration + operation. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The ID of the stack that the instance is to be + registered with. + + :type hostname: string + :param hostname: The instance's hostname. + + :type public_ip: string + :param public_ip: The instance's public IP address. + + :type private_ip: string + :param private_ip: The instance's private IP address. + + :type rsa_public_key: string + :param rsa_public_key: The instances public RSA key. This key is used + to encrypt communication between the instance and the service. + + :type rsa_public_key_fingerprint: string + :param rsa_public_key_fingerprint: The instances public RSA key + fingerprint. + + :type instance_identity: dict + :param instance_identity: An InstanceIdentity object that contains the + instance's identity. + + """ + params = {'StackId': stack_id, } + if hostname is not None: + params['Hostname'] = hostname + if public_ip is not None: + params['PublicIp'] = public_ip + if private_ip is not None: + params['PrivateIp'] = private_ip + if rsa_public_key is not None: + params['RsaPublicKey'] = rsa_public_key + if rsa_public_key_fingerprint is not None: + params['RsaPublicKeyFingerprint'] = rsa_public_key_fingerprint + if instance_identity is not None: + params['InstanceIdentity'] = instance_identity + return self.make_request(action='RegisterInstance', + body=json.dumps(params)) + + def register_rds_db_instance(self, stack_id, rds_db_instance_arn, + db_user, db_password): + """ + Registers an Amazon RDS instance with a stack. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type rds_db_instance_arn: string + :param rds_db_instance_arn: The Amazon RDS instance's ARN. + + :type db_user: string + :param db_user: The database's master user name. + + :type db_password: string + :param db_password: The database password. + + """ + params = { + 'StackId': stack_id, + 'RdsDbInstanceArn': rds_db_instance_arn, + 'DbUser': db_user, + 'DbPassword': db_password, + } + return self.make_request(action='RegisterRdsDbInstance', + body=json.dumps(params)) + + def register_volume(self, stack_id, ec_2_volume_id=None): + """ + Registers an Amazon EBS volume with a specified stack. A + volume can be registered with only one stack at a time. If the + volume is already registered, you must first deregister it by + calling DeregisterVolume. For more information, see `Resource + Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type ec_2_volume_id: string + :param ec_2_volume_id: The Amazon EBS volume ID. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + if ec_2_volume_id is not None: + params['Ec2VolumeId'] = ec_2_volume_id + return self.make_request(action='RegisterVolume', + body=json.dumps(params)) + + def set_load_based_auto_scaling(self, layer_id, enable=None, + up_scaling=None, down_scaling=None): + """ + Specify the load-based auto scaling configuration for a + specified layer. For more information, see `Managing Load with + Time-based and Load-based Instances`_. + + + To use load-based auto scaling, you must create a set of load- + based auto scaling instances. Load-based auto scaling operates + only on the instances from that set, so you must ensure that + you have created enough instances to handle the maximum + anticipated load. + + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type layer_id: string + :param layer_id: The layer ID. + + :type enable: boolean + :param enable: Enables load-based auto scaling for the layer. + + :type up_scaling: dict + :param up_scaling: An `AutoScalingThresholds` object with the upscaling + threshold configuration. If the load exceeds these thresholds for a + specified amount of time, AWS OpsWorks starts a specified number of + instances. + + :type down_scaling: dict + :param down_scaling: An `AutoScalingThresholds` object with the + downscaling threshold configuration. If the load falls below these + thresholds for a specified amount of time, AWS OpsWorks stops a + specified number of instances. + + """ + params = {'LayerId': layer_id, } + if enable is not None: + params['Enable'] = enable + if up_scaling is not None: + params['UpScaling'] = up_scaling + if down_scaling is not None: + params['DownScaling'] = down_scaling + return self.make_request(action='SetLoadBasedAutoScaling', + body=json.dumps(params)) + + def set_permission(self, stack_id, iam_user_arn, allow_ssh=None, + allow_sudo=None, level=None): + """ + Specifies a user's permissions. For more information, see + `Security and Permissions`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type iam_user_arn: string + :param iam_user_arn: The user's IAM ARN. + + :type allow_ssh: boolean + :param allow_ssh: The user is allowed to use SSH to communicate with + the instance. + + :type allow_sudo: boolean + :param allow_sudo: The user is allowed to use **sudo** to elevate + privileges. + + :type level: string + :param level: The user's permission level, which must be set to one of + the following strings. You cannot set your own permissions level. + + + `deny` + + `show` + + `deploy` + + `manage` + + `iam_only` + + + For more information on the permissions associated with these levels, + see `Managing User Permissions`_ + + """ + params = {'StackId': stack_id, 'IamUserArn': iam_user_arn, } + if allow_ssh is not None: + params['AllowSsh'] = allow_ssh + if allow_sudo is not None: + params['AllowSudo'] = allow_sudo + if level is not None: + params['Level'] = level + return self.make_request(action='SetPermission', + body=json.dumps(params)) + + def set_time_based_auto_scaling(self, instance_id, + auto_scaling_schedule=None): + """ + Specify the time-based auto scaling configuration for a + specified instance. For more information, see `Managing Load + with Time-based and Load-based Instances`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + :type auto_scaling_schedule: dict + :param auto_scaling_schedule: An `AutoScalingSchedule` with the + instance schedule. + + """ + params = {'InstanceId': instance_id, } + if auto_scaling_schedule is not None: + params['AutoScalingSchedule'] = auto_scaling_schedule + return self.make_request(action='SetTimeBasedAutoScaling', + body=json.dumps(params)) + + def start_instance(self, instance_id): + """ + Starts a specified instance. For more information, see + `Starting, Stopping, and Rebooting Instances`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='StartInstance', + body=json.dumps(params)) + + def start_stack(self, stack_id): + """ + Starts a stack's instances. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + return self.make_request(action='StartStack', + body=json.dumps(params)) + + def stop_instance(self, instance_id): + """ + Stops a specified instance. When you stop a standard instance, + the data disappears and must be reinstalled when you restart + the instance. You can stop an Amazon EBS-backed instance + without losing data. For more information, see `Starting, + Stopping, and Rebooting Instances`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='StopInstance', + body=json.dumps(params)) + + def stop_stack(self, stack_id): + """ + Stops a specified stack. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + return self.make_request(action='StopStack', + body=json.dumps(params)) + + def unassign_instance(self, instance_id): + """ + Unassigns a registered instance from all of it's layers. The + instance remains in the stack as an unassigned instance and + can be assigned to another layer, as needed. You cannot use + this action with instances that were created with AWS + OpsWorks. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='UnassignInstance', + body=json.dumps(params)) + + def unassign_volume(self, volume_id): + """ + Unassigns an assigned Amazon EBS volume. The volume remains + registered with the stack. For more information, see `Resource + Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type volume_id: string + :param volume_id: The volume ID. + + """ + params = {'VolumeId': volume_id, } + return self.make_request(action='UnassignVolume', + body=json.dumps(params)) + + def update_app(self, app_id, name=None, description=None, + data_sources=None, type=None, app_source=None, + domains=None, enable_ssl=None, ssl_configuration=None, + attributes=None, environment=None): + """ + Updates a specified app. + + **Required Permissions**: To use this action, an IAM user must + have a Deploy or Manage permissions level for the stack, or an + attached policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type app_id: string + :param app_id: The app ID. + + :type name: string + :param name: The app name. + + :type description: string + :param description: A description of the app. + + :type data_sources: list + :param data_sources: The app's data sources. + + :type type: string + :param type: The app type. + + :type app_source: dict + :param app_source: A `Source` object that specifies the app repository. + + :type domains: list + :param domains: The app's virtual host settings, with multiple domains + separated by commas. For example: `'www.example.com, example.com'` + + :type enable_ssl: boolean + :param enable_ssl: Whether SSL is enabled for the app. + + :type ssl_configuration: dict + :param ssl_configuration: An `SslConfiguration` object with the SSL + configuration. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes. + + :type environment: list + :param environment: + An array of `EnvironmentVariable` objects that specify environment + variables to be associated with the app. You can specify up to ten + environment variables. After you deploy the app, these variables + are defined on the associated app server instances. + + This parameter is supported only by Chef 11.10 stacks. If you have + specified one or more environment variables, you cannot modify the + stack's Chef version. + + """ + params = {'AppId': app_id, } + if name is not None: + params['Name'] = name + if description is not None: + params['Description'] = description + if data_sources is not None: + params['DataSources'] = data_sources + if type is not None: + params['Type'] = type + if app_source is not None: + params['AppSource'] = app_source + if domains is not None: + params['Domains'] = domains + if enable_ssl is not None: + params['EnableSsl'] = enable_ssl + if ssl_configuration is not None: + params['SslConfiguration'] = ssl_configuration + if attributes is not None: + params['Attributes'] = attributes + if environment is not None: + params['Environment'] = environment + return self.make_request(action='UpdateApp', + body=json.dumps(params)) + + def update_elastic_ip(self, elastic_ip, name=None): + """ + Updates a registered Elastic IP address's name. For more + information, see `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_ip: string + :param elastic_ip: The address. + + :type name: string + :param name: The new name. + + """ + params = {'ElasticIp': elastic_ip, } + if name is not None: + params['Name'] = name + return self.make_request(action='UpdateElasticIp', + body=json.dumps(params)) + + def update_instance(self, instance_id, layer_ids=None, + instance_type=None, auto_scaling_type=None, + hostname=None, os=None, ami_id=None, + ssh_key_name=None, architecture=None, + install_updates_on_boot=None, ebs_optimized=None): + """ + Updates a specified instance. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + :type layer_ids: list + :param layer_ids: The instance's layer IDs. + + :type instance_type: string + :param instance_type: The instance type. AWS OpsWorks supports all + instance types except Cluster Compute, Cluster GPU, and High Memory + Cluster. For more information, see `Instance Families and Types`_. + The parameter values that you use to specify the various types are + in the API Name column of the Available Instance Types table. + + :type auto_scaling_type: string + :param auto_scaling_type: For load-based or time-based instances, the + type. + + :type hostname: string + :param hostname: The instance host name. + + :type os: string + :param os: The instance's operating system, which must be set to one of + the following. + + + Standard operating systems: An Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom` + + + The default option is the current Amazon Linux version, such as `Amazon + Linux 2014.09`. If you set this parameter to `Custom`, you must use + the CreateInstance action's AmiId parameter to specify the custom + AMI that you want to use. For more information on the standard + operating systems, see `Operating Systems`_For more information on + how to use custom AMIs with OpsWorks, see `Using Custom AMIs`_. + + :type ami_id: string + :param ami_id: + A custom AMI ID to be used to create the instance. The AMI should be + based on one of the standard AWS OpsWorks AMIs: Amazon Linux, + Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see + `Instances`_ + + If you specify a custom AMI, you must set `Os` to `Custom`. + + :type ssh_key_name: string + :param ssh_key_name: The instance SSH key name. + + :type architecture: string + :param architecture: The instance architecture. Instance types do not + necessarily support both architectures. For a list of the + architectures that are supported by the different instance types, + see `Instance Families and Types`_. + + :type install_updates_on_boot: boolean + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. + + :type ebs_optimized: boolean + :param ebs_optimized: Whether this is an Amazon EBS-optimized instance. + + """ + params = {'InstanceId': instance_id, } + if layer_ids is not None: + params['LayerIds'] = layer_ids + if instance_type is not None: + params['InstanceType'] = instance_type + if auto_scaling_type is not None: + params['AutoScalingType'] = auto_scaling_type + if hostname is not None: + params['Hostname'] = hostname + if os is not None: + params['Os'] = os + if ami_id is not None: + params['AmiId'] = ami_id + if ssh_key_name is not None: + params['SshKeyName'] = ssh_key_name + if architecture is not None: + params['Architecture'] = architecture + if install_updates_on_boot is not None: + params['InstallUpdatesOnBoot'] = install_updates_on_boot + if ebs_optimized is not None: + params['EbsOptimized'] = ebs_optimized + return self.make_request(action='UpdateInstance', + body=json.dumps(params)) + + def update_layer(self, layer_id, name=None, shortname=None, + attributes=None, custom_instance_profile_arn=None, + custom_security_group_ids=None, packages=None, + volume_configurations=None, enable_auto_healing=None, + auto_assign_elastic_ips=None, + auto_assign_public_ips=None, custom_recipes=None, + install_updates_on_boot=None, + use_ebs_optimized_instances=None, + lifecycle_event_configuration=None): + """ + Updates a specified layer. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type layer_id: string + :param layer_id: The layer ID. + + :type name: string + :param name: The layer name, which is used by the console. + + :type shortname: string + :param shortname: The layer short name, which is used internally by AWS + OpsWorksand by Chef. The short name is also used as the name for + the directory where your app files are installed. It can have a + maximum of 200 characters and must be in the following format: + /\A[a-z0-9\-\_\.]+\Z/. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes. + + :type custom_instance_profile_arn: string + :param custom_instance_profile_arn: The ARN of an IAM profile to be + used for all of the layer's EC2 instances. For more information + about IAM ARNs, see `Using Identifiers`_. + + :type custom_security_group_ids: list + :param custom_security_group_ids: An array containing the layer's + custom security group IDs. + + :type packages: list + :param packages: An array of `Package` objects that describe the + layer's packages. + + :type volume_configurations: list + :param volume_configurations: A `VolumeConfigurations` object that + describes the layer's Amazon EBS volumes. + + :type enable_auto_healing: boolean + :param enable_auto_healing: Whether to disable auto healing for the + layer. + + :type auto_assign_elastic_ips: boolean + :param auto_assign_elastic_ips: Whether to automatically assign an + `Elastic IP address`_ to the layer's instances. For more + information, see `How to Edit a Layer`_. + + :type auto_assign_public_ips: boolean + :param auto_assign_public_ips: For stacks that are running in a VPC, + whether to automatically assign a public IP address to the layer's + instances. For more information, see `How to Edit a Layer`_. + + :type custom_recipes: dict + :param custom_recipes: A `LayerCustomRecipes` object that specifies the + layer's custom recipes. + + :type install_updates_on_boot: boolean + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. + + :type use_ebs_optimized_instances: boolean + :param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized + instances. + + :type lifecycle_event_configuration: dict + :param lifecycle_event_configuration: + + """ + params = {'LayerId': layer_id, } + if name is not None: + params['Name'] = name + if shortname is not None: + params['Shortname'] = shortname + if attributes is not None: + params['Attributes'] = attributes + if custom_instance_profile_arn is not None: + params['CustomInstanceProfileArn'] = custom_instance_profile_arn + if custom_security_group_ids is not None: + params['CustomSecurityGroupIds'] = custom_security_group_ids + if packages is not None: + params['Packages'] = packages + if volume_configurations is not None: + params['VolumeConfigurations'] = volume_configurations + if enable_auto_healing is not None: + params['EnableAutoHealing'] = enable_auto_healing + if auto_assign_elastic_ips is not None: + params['AutoAssignElasticIps'] = auto_assign_elastic_ips + if auto_assign_public_ips is not None: + params['AutoAssignPublicIps'] = auto_assign_public_ips + if custom_recipes is not None: + params['CustomRecipes'] = custom_recipes + if install_updates_on_boot is not None: + params['InstallUpdatesOnBoot'] = install_updates_on_boot + if use_ebs_optimized_instances is not None: + params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances + if lifecycle_event_configuration is not None: + params['LifecycleEventConfiguration'] = lifecycle_event_configuration + return self.make_request(action='UpdateLayer', + body=json.dumps(params)) + + def update_my_user_profile(self, ssh_public_key=None): + """ + Updates a user's SSH public key. + + **Required Permissions**: To use this action, an IAM user must + have self-management enabled or an attached policy that + explicitly grants permissions. For more information on user + permissions, see `Managing User Permissions`_. + + :type ssh_public_key: string + :param ssh_public_key: The user's SSH public key. + + """ + params = {} + if ssh_public_key is not None: + params['SshPublicKey'] = ssh_public_key + return self.make_request(action='UpdateMyUserProfile', + body=json.dumps(params)) + + def update_rds_db_instance(self, rds_db_instance_arn, db_user=None, + db_password=None): + """ + Updates an Amazon RDS instance. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type rds_db_instance_arn: string + :param rds_db_instance_arn: The Amazon RDS instance's ARN. + + :type db_user: string + :param db_user: The master user name. + + :type db_password: string + :param db_password: The database password. + + """ + params = {'RdsDbInstanceArn': rds_db_instance_arn, } + if db_user is not None: + params['DbUser'] = db_user + if db_password is not None: + params['DbPassword'] = db_password + return self.make_request(action='UpdateRdsDbInstance', + body=json.dumps(params)) + + def update_stack(self, stack_id, name=None, attributes=None, + service_role_arn=None, + default_instance_profile_arn=None, default_os=None, + hostname_theme=None, default_availability_zone=None, + default_subnet_id=None, custom_json=None, + configuration_manager=None, chef_configuration=None, + use_custom_cookbooks=None, custom_cookbooks_source=None, + default_ssh_key_name=None, + default_root_device_type=None, + use_opsworks_security_groups=None): + """ + Updates a specified stack. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type name: string + :param name: The stack's new name. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes. + + :type service_role_arn: string + :param service_role_arn: + The stack AWS Identity and Access Management (IAM) role, which allows + AWS OpsWorks to work with AWS resources on your behalf. You must + set this parameter to the Amazon Resource Name (ARN) for an + existing IAM role. For more information about IAM ARNs, see `Using + Identifiers`_. + + + You must set this parameter to a valid service role ARN or the action + will fail; there is no default value. You can specify the stack's + current service role ARN, if you prefer, but you must do so + explicitly. + + :type default_instance_profile_arn: string + :param default_instance_profile_arn: The ARN of an IAM profile that is + the default profile for all of the stack's EC2 instances. For more + information about IAM ARNs, see `Using Identifiers`_. + + :type default_os: string + :param default_os: The stack's operating system, which must be set to + one of the following. + + + Standard operating systems: an Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom`. You specify the custom AMI you want to use + when you create instances. + + + The default option is the current Amazon Linux version. + + :type hostname_theme: string + :param hostname_theme: The stack's new host name theme, with spaces are + replaced by underscores. The theme is used to generate host names + for the stack's instances. By default, `HostnameTheme` is set to + `Layer_Dependent`, which creates host names by appending integers + to the layer's short name. The other themes are: + + + `Baked_Goods` + + `Clouds` + + `European_Cities` + + `Fruits` + + `Greek_Deities` + + `Legendary_Creatures_from_Japan` + + `Planets_and_Moons` + + `Roman_Deities` + + `Scottish_Islands` + + `US_Cities` + + `Wild_Cats` + + + To obtain a generated host name, call `GetHostNameSuggestion`, which + returns a host name based on the current theme. + + :type default_availability_zone: string + :param default_availability_zone: The stack's default Availability + Zone, which must be in the specified region. For more information, + see `Regions and Endpoints`_. If you also specify a value for + `DefaultSubnetId`, the subnet must be in the same zone. For more + information, see CreateStack. + + :type default_subnet_id: string + :param default_subnet_id: The stack's default VPC subnet ID. This + parameter is required if you specify a value for the `VpcId` + parameter. All instances are launched into this subnet unless you + specify otherwise when you create the instance. If you also specify + a value for `DefaultAvailabilityZone`, the subnet must be in that + zone. For information on default values and when this parameter is + required, see the `VpcId` parameter description. + + :type custom_json: string + :param custom_json: A string that contains user-defined, custom JSON. + It is used to override the corresponding default stack + configuration JSON values. The string should be in the following + format and must escape characters such as '"'.: + `"{\"key1\": \"value1\", \"key2\": \"value2\",...}"` + + For more information on custom JSON, see `Use Custom JSON to Modify the + Stack Configuration JSON`_. + + :type configuration_manager: dict + :param configuration_manager: The configuration manager. When you clone + a stack we recommend that you use the configuration manager to + specify the Chef version, 0.9, 11.4, or 11.10. The default value is + currently 11.4. + + :type chef_configuration: dict + :param chef_configuration: A `ChefConfiguration` object that specifies + whether to enable Berkshelf and the Berkshelf version on Chef 11.10 + stacks. For more information, see `Create a New Stack`_. + + :type use_custom_cookbooks: boolean + :param use_custom_cookbooks: Whether the stack uses custom cookbooks. + + :type custom_cookbooks_source: dict + :param custom_cookbooks_source: Contains the information required to + retrieve an app or cookbook from a repository. For more + information, see `Creating Apps`_ or `Custom Recipes and + Cookbooks`_. + + :type default_ssh_key_name: string + :param default_ssh_key_name: A default SSH key for the stack instances. + You can override this value when you create or update an instance. + + :type default_root_device_type: string + :param default_root_device_type: The default root device type. This + value is used by default for all instances in the stack, but you + can override it when you create an instance. For more information, + see `Storage for the Root Device`_. + + :type use_opsworks_security_groups: boolean + :param use_opsworks_security_groups: Whether to associate the AWS + OpsWorks built-in security groups with the stack's layers. + AWS OpsWorks provides a standard set of built-in security groups, one + for each layer, which are associated with layers by default. + `UseOpsworksSecurityGroups` allows you to instead provide your own + custom security groups. `UseOpsworksSecurityGroups` has the + following settings: + + + + True - AWS OpsWorks automatically associates the appropriate built-in + security group with each layer (default setting). You can associate + additional security groups with a layer after you create it but you + cannot delete the built-in security group. + + False - AWS OpsWorks does not associate built-in security groups with + layers. You must create appropriate EC2 security groups and + associate a security group with each layer that you create. + However, you can still manually associate a built-in security group + with a layer on creation; custom security groups are required only + for those layers that need custom settings. + + + For more information, see `Create a New Stack`_. + + """ + params = {'StackId': stack_id, } + if name is not None: + params['Name'] = name + if attributes is not None: + params['Attributes'] = attributes + if service_role_arn is not None: + params['ServiceRoleArn'] = service_role_arn + if default_instance_profile_arn is not None: + params['DefaultInstanceProfileArn'] = default_instance_profile_arn + if default_os is not None: + params['DefaultOs'] = default_os + if hostname_theme is not None: + params['HostnameTheme'] = hostname_theme + if default_availability_zone is not None: + params['DefaultAvailabilityZone'] = default_availability_zone + if default_subnet_id is not None: + params['DefaultSubnetId'] = default_subnet_id + if custom_json is not None: + params['CustomJson'] = custom_json + if configuration_manager is not None: + params['ConfigurationManager'] = configuration_manager + if chef_configuration is not None: + params['ChefConfiguration'] = chef_configuration + if use_custom_cookbooks is not None: + params['UseCustomCookbooks'] = use_custom_cookbooks + if custom_cookbooks_source is not None: + params['CustomCookbooksSource'] = custom_cookbooks_source + if default_ssh_key_name is not None: + params['DefaultSshKeyName'] = default_ssh_key_name + if default_root_device_type is not None: + params['DefaultRootDeviceType'] = default_root_device_type + if use_opsworks_security_groups is not None: + params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups + return self.make_request(action='UpdateStack', + body=json.dumps(params)) + + def update_user_profile(self, iam_user_arn, ssh_username=None, + ssh_public_key=None, allow_self_management=None): + """ + Updates a specified user profile. + + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + + :type iam_user_arn: string + :param iam_user_arn: The user IAM ARN. + + :type ssh_username: string + :param ssh_username: The user's SSH user name. The allowable characters + are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name + includes other punctuation marks, AWS OpsWorks removes them. For + example, `my.name` will be changed to `myname`. If you do not + specify an SSH user name, AWS OpsWorks generates one from the IAM + user name. + + :type ssh_public_key: string + :param ssh_public_key: The user's new SSH public key. + + :type allow_self_management: boolean + :param allow_self_management: Whether users can specify their own SSH + public key through the My Settings page. For more information, see + `Managing User Permissions`_. + + """ + params = {'IamUserArn': iam_user_arn, } + if ssh_username is not None: + params['SshUsername'] = ssh_username + if ssh_public_key is not None: + params['SshPublicKey'] = ssh_public_key + if allow_self_management is not None: + params['AllowSelfManagement'] = allow_self_management + return self.make_request(action='UpdateUserProfile', + body=json.dumps(params)) + + def update_volume(self, volume_id, name=None, mount_point=None): + """ + Updates an Amazon EBS volume's name or mount point. For more + information, see `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type volume_id: string + :param volume_id: The volume ID. + + :type name: string + :param name: The new name. + + :type mount_point: string + :param mount_point: The new mount point. + + """ + params = {'VolumeId': volume_id, } + if name is not None: + params['Name'] = name + if mount_point is not None: + params['MountPoint'] = mount_point + return self.make_request(action='UpdateVolume', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/plugin.py b/desktop/core/ext-py/boto-2.38.0/boto/plugin.py new file mode 100644 index 0000000000000000000000000000000000000000..2c2931c9dfab9873fcd17015a6f1d8ca16fd4ccb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/plugin.py @@ -0,0 +1,93 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +""" +Implements plugin related api. + +To define a new plugin just subclass Plugin, like this. + +class AuthPlugin(Plugin): + pass + +Then start creating subclasses of your new plugin. + +class MyFancyAuth(AuthPlugin): + capability = ['sign', 'vmac'] + +The actual interface is duck typed. +""" + +import glob +import imp +import os.path + + +class Plugin(object): + """Base class for all plugins.""" + + capability = [] + + @classmethod + def is_capable(cls, requested_capability): + """Returns true if the requested capability is supported by this plugin + """ + for c in requested_capability: + if c not in cls.capability: + return False + return True + + +def get_plugin(cls, requested_capability=None): + if not requested_capability: + requested_capability = [] + result = [] + for handler in cls.__subclasses__(): + if handler.is_capable(requested_capability): + result.append(handler) + return result + + +def _import_module(filename): + (path, name) = os.path.split(filename) + (name, ext) = os.path.splitext(name) + + (file, filename, data) = imp.find_module(name, [path]) + try: + return imp.load_module(name, file, filename, data) + finally: + if file: + file.close() + +_plugin_loaded = False + + +def load_plugins(config): + global _plugin_loaded + if _plugin_loaded: + return + _plugin_loaded = True + + if not config.has_option('Plugin', 'plugin_directory'): + return + directory = config.get('Plugin', 'plugin_directory') + for file in glob.glob(os.path.join(directory, '*.py')): + _import_module(file) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/provider.py b/desktop/core/ext-py/boto-2.38.0/boto/provider.py new file mode 100644 index 0000000000000000000000000000000000000000..349a7a6c748337d10ded0ba3a3ac8065e4caf308 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/provider.py @@ -0,0 +1,452 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright 2010 Google Inc. +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +This class encapsulates the provider-specific header differences. +""" + +import os +from boto.compat import six +from datetime import datetime + +import boto +from boto import config +from boto.compat import expanduser +from boto.pyami.config import Config +from boto.gs.acl import ACL +from boto.gs.acl import CannedACLStrings as CannedGSACLStrings +from boto.s3.acl import CannedACLStrings as CannedS3ACLStrings +from boto.s3.acl import Policy + + +HEADER_PREFIX_KEY = 'header_prefix' +METADATA_PREFIX_KEY = 'metadata_prefix' + +AWS_HEADER_PREFIX = 'x-amz-' +GOOG_HEADER_PREFIX = 'x-goog-' + +ACL_HEADER_KEY = 'acl-header' +AUTH_HEADER_KEY = 'auth-header' +COPY_SOURCE_HEADER_KEY = 'copy-source-header' +COPY_SOURCE_VERSION_ID_HEADER_KEY = 'copy-source-version-id-header' +COPY_SOURCE_RANGE_HEADER_KEY = 'copy-source-range-header' +DELETE_MARKER_HEADER_KEY = 'delete-marker-header' +DATE_HEADER_KEY = 'date-header' +METADATA_DIRECTIVE_HEADER_KEY = 'metadata-directive-header' +RESUMABLE_UPLOAD_HEADER_KEY = 'resumable-upload-header' +SECURITY_TOKEN_HEADER_KEY = 'security-token-header' +STORAGE_CLASS_HEADER_KEY = 'storage-class' +MFA_HEADER_KEY = 'mfa-header' +SERVER_SIDE_ENCRYPTION_KEY = 'server-side-encryption-header' +VERSION_ID_HEADER_KEY = 'version-id-header' +RESTORE_HEADER_KEY = 'restore-header' + +STORAGE_COPY_ERROR = 'StorageCopyError' +STORAGE_CREATE_ERROR = 'StorageCreateError' +STORAGE_DATA_ERROR = 'StorageDataError' +STORAGE_PERMISSIONS_ERROR = 'StoragePermissionsError' +STORAGE_RESPONSE_ERROR = 'StorageResponseError' +NO_CREDENTIALS_PROVIDED = object() + + +class ProfileNotFoundError(ValueError): + pass + + +class Provider(object): + + CredentialMap = { + 'aws': ('aws_access_key_id', 'aws_secret_access_key', + 'aws_security_token', 'aws_profile'), + 'google': ('gs_access_key_id', 'gs_secret_access_key', + None, None), + } + + AclClassMap = { + 'aws': Policy, + 'google': ACL + } + + CannedAclsMap = { + 'aws': CannedS3ACLStrings, + 'google': CannedGSACLStrings + } + + HostKeyMap = { + 'aws': 's3', + 'google': 'gs' + } + + ChunkedTransferSupport = { + 'aws': False, + 'google': True + } + + MetadataServiceSupport = { + 'aws': True, + 'google': False + } + + # If you update this map please make sure to put "None" for the + # right-hand-side for any headers that don't apply to a provider, rather + # than simply leaving that header out (which would cause KeyErrors). + HeaderInfoMap = { + 'aws': { + HEADER_PREFIX_KEY: AWS_HEADER_PREFIX, + METADATA_PREFIX_KEY: AWS_HEADER_PREFIX + 'meta-', + ACL_HEADER_KEY: AWS_HEADER_PREFIX + 'acl', + AUTH_HEADER_KEY: 'AWS', + COPY_SOURCE_HEADER_KEY: AWS_HEADER_PREFIX + 'copy-source', + COPY_SOURCE_VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + + 'copy-source-version-id', + COPY_SOURCE_RANGE_HEADER_KEY: AWS_HEADER_PREFIX + + 'copy-source-range', + DATE_HEADER_KEY: AWS_HEADER_PREFIX + 'date', + DELETE_MARKER_HEADER_KEY: AWS_HEADER_PREFIX + 'delete-marker', + METADATA_DIRECTIVE_HEADER_KEY: AWS_HEADER_PREFIX + + 'metadata-directive', + RESUMABLE_UPLOAD_HEADER_KEY: None, + SECURITY_TOKEN_HEADER_KEY: AWS_HEADER_PREFIX + 'security-token', + SERVER_SIDE_ENCRYPTION_KEY: AWS_HEADER_PREFIX + + 'server-side-encryption', + VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'version-id', + STORAGE_CLASS_HEADER_KEY: AWS_HEADER_PREFIX + 'storage-class', + MFA_HEADER_KEY: AWS_HEADER_PREFIX + 'mfa', + RESTORE_HEADER_KEY: AWS_HEADER_PREFIX + 'restore', + }, + 'google': { + HEADER_PREFIX_KEY: GOOG_HEADER_PREFIX, + METADATA_PREFIX_KEY: GOOG_HEADER_PREFIX + 'meta-', + ACL_HEADER_KEY: GOOG_HEADER_PREFIX + 'acl', + AUTH_HEADER_KEY: 'GOOG1', + COPY_SOURCE_HEADER_KEY: GOOG_HEADER_PREFIX + 'copy-source', + COPY_SOURCE_VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + + 'copy-source-version-id', + COPY_SOURCE_RANGE_HEADER_KEY: None, + DATE_HEADER_KEY: GOOG_HEADER_PREFIX + 'date', + DELETE_MARKER_HEADER_KEY: GOOG_HEADER_PREFIX + 'delete-marker', + METADATA_DIRECTIVE_HEADER_KEY: GOOG_HEADER_PREFIX + + 'metadata-directive', + RESUMABLE_UPLOAD_HEADER_KEY: GOOG_HEADER_PREFIX + 'resumable', + SECURITY_TOKEN_HEADER_KEY: GOOG_HEADER_PREFIX + 'security-token', + SERVER_SIDE_ENCRYPTION_KEY: None, + # Note that this version header is not to be confused with + # the Google Cloud Storage 'x-goog-api-version' header. + VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'version-id', + STORAGE_CLASS_HEADER_KEY: None, + MFA_HEADER_KEY: None, + RESTORE_HEADER_KEY: None, + } + } + + ErrorMap = { + 'aws': { + STORAGE_COPY_ERROR: boto.exception.S3CopyError, + STORAGE_CREATE_ERROR: boto.exception.S3CreateError, + STORAGE_DATA_ERROR: boto.exception.S3DataError, + STORAGE_PERMISSIONS_ERROR: boto.exception.S3PermissionsError, + STORAGE_RESPONSE_ERROR: boto.exception.S3ResponseError, + }, + 'google': { + STORAGE_COPY_ERROR: boto.exception.GSCopyError, + STORAGE_CREATE_ERROR: boto.exception.GSCreateError, + STORAGE_DATA_ERROR: boto.exception.GSDataError, + STORAGE_PERMISSIONS_ERROR: boto.exception.GSPermissionsError, + STORAGE_RESPONSE_ERROR: boto.exception.GSResponseError, + } + } + + def __init__(self, name, access_key=None, secret_key=None, + security_token=None, profile_name=None): + self.host = None + self.port = None + self.host_header = None + self.access_key = access_key + self.secret_key = secret_key + self.security_token = security_token + self.profile_name = profile_name + self.name = name + self.acl_class = self.AclClassMap[self.name] + self.canned_acls = self.CannedAclsMap[self.name] + self._credential_expiry_time = None + + # Load shared credentials file if it exists + shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials') + self.shared_credentials = Config(do_load=False) + if os.path.isfile(shared_path): + self.shared_credentials.load_from_path(shared_path) + + self.get_credentials(access_key, secret_key, security_token, profile_name) + self.configure_headers() + self.configure_errors() + + # Allow config file to override default host and port. + host_opt_name = '%s_host' % self.HostKeyMap[self.name] + if config.has_option('Credentials', host_opt_name): + self.host = config.get('Credentials', host_opt_name) + port_opt_name = '%s_port' % self.HostKeyMap[self.name] + if config.has_option('Credentials', port_opt_name): + self.port = config.getint('Credentials', port_opt_name) + host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name] + if config.has_option('Credentials', host_header_opt_name): + self.host_header = config.get('Credentials', host_header_opt_name) + + def get_access_key(self): + if self._credentials_need_refresh(): + self._populate_keys_from_metadata_server() + return self._access_key + + def set_access_key(self, value): + self._access_key = value + + access_key = property(get_access_key, set_access_key) + + def get_secret_key(self): + if self._credentials_need_refresh(): + self._populate_keys_from_metadata_server() + return self._secret_key + + def set_secret_key(self, value): + self._secret_key = value + + secret_key = property(get_secret_key, set_secret_key) + + def get_security_token(self): + if self._credentials_need_refresh(): + self._populate_keys_from_metadata_server() + return self._security_token + + def set_security_token(self, value): + self._security_token = value + + security_token = property(get_security_token, set_security_token) + + def _credentials_need_refresh(self): + if self._credential_expiry_time is None: + return False + else: + # The credentials should be refreshed if they're going to expire + # in less than 5 minutes. + delta = self._credential_expiry_time - datetime.utcnow() + # python2.6 does not have timedelta.total_seconds() so we have + # to calculate this ourselves. This is straight from the + # datetime docs. + seconds_left = ( + (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) + * 10 ** 6) / 10 ** 6) + if seconds_left < (5 * 60): + boto.log.debug("Credentials need to be refreshed.") + return True + else: + return False + + def get_credentials(self, access_key=None, secret_key=None, + security_token=None, profile_name=None): + access_key_name, secret_key_name, security_token_name, \ + profile_name_name = self.CredentialMap[self.name] + + # Load profile from shared environment variable if it was not + # already passed in and the environment variable exists + if profile_name is None and profile_name_name is not None and \ + profile_name_name.upper() in os.environ: + profile_name = os.environ[profile_name_name.upper()] + + shared = self.shared_credentials + + if access_key is not None: + self.access_key = access_key + boto.log.debug("Using access key provided by client.") + elif access_key_name.upper() in os.environ: + self.access_key = os.environ[access_key_name.upper()] + boto.log.debug("Using access key found in environment variable.") + elif profile_name is not None: + if shared.has_option(profile_name, access_key_name): + self.access_key = shared.get(profile_name, access_key_name) + boto.log.debug("Using access key found in shared credential " + "file for profile %s." % profile_name) + elif config.has_option("profile %s" % profile_name, + access_key_name): + self.access_key = config.get("profile %s" % profile_name, + access_key_name) + boto.log.debug("Using access key found in config file: " + "profile %s." % profile_name) + else: + raise ProfileNotFoundError('Profile "%s" not found!' % + profile_name) + elif shared.has_option('default', access_key_name): + self.access_key = shared.get('default', access_key_name) + boto.log.debug("Using access key found in shared credential file.") + elif config.has_option('Credentials', access_key_name): + self.access_key = config.get('Credentials', access_key_name) + boto.log.debug("Using access key found in config file.") + + if secret_key is not None: + self.secret_key = secret_key + boto.log.debug("Using secret key provided by client.") + elif secret_key_name.upper() in os.environ: + self.secret_key = os.environ[secret_key_name.upper()] + boto.log.debug("Using secret key found in environment variable.") + elif profile_name is not None: + if shared.has_option(profile_name, secret_key_name): + self.secret_key = shared.get(profile_name, secret_key_name) + boto.log.debug("Using secret key found in shared credential " + "file for profile %s." % profile_name) + elif config.has_option("profile %s" % profile_name, secret_key_name): + self.secret_key = config.get("profile %s" % profile_name, + secret_key_name) + boto.log.debug("Using secret key found in config file: " + "profile %s." % profile_name) + else: + raise ProfileNotFoundError('Profile "%s" not found!' % + profile_name) + elif shared.has_option('default', secret_key_name): + self.secret_key = shared.get('default', secret_key_name) + boto.log.debug("Using secret key found in shared credential file.") + elif config.has_option('Credentials', secret_key_name): + self.secret_key = config.get('Credentials', secret_key_name) + boto.log.debug("Using secret key found in config file.") + elif config.has_option('Credentials', 'keyring'): + keyring_name = config.get('Credentials', 'keyring') + try: + import keyring + except ImportError: + boto.log.error("The keyring module could not be imported. " + "For keyring support, install the keyring " + "module.") + raise + self.secret_key = keyring.get_password( + keyring_name, self.access_key) + boto.log.debug("Using secret key found in keyring.") + + if security_token is not None: + self.security_token = security_token + boto.log.debug("Using security token provided by client.") + elif ((security_token_name is not None) and + (access_key is None) and (secret_key is None)): + # Only provide a token from the environment/config if the + # caller did not specify a key and secret. Otherwise an + # environment/config token could be paired with a + # different set of credentials provided by the caller + if security_token_name.upper() in os.environ: + self.security_token = os.environ[security_token_name.upper()] + boto.log.debug("Using security token found in environment" + " variable.") + elif shared.has_option(profile_name or 'default', + security_token_name): + self.security_token = shared.get(profile_name or 'default', + security_token_name) + boto.log.debug("Using security token found in shared " + "credential file.") + elif profile_name is not None: + if config.has_option("profile %s" % profile_name, + security_token_name): + boto.log.debug("config has option") + self.security_token = config.get("profile %s" % profile_name, + security_token_name) + boto.log.debug("Using security token found in config file: " + "profile %s." % profile_name) + elif config.has_option('Credentials', security_token_name): + self.security_token = config.get('Credentials', + security_token_name) + boto.log.debug("Using security token found in config file.") + + if ((self._access_key is None or self._secret_key is None) and + self.MetadataServiceSupport[self.name]): + self._populate_keys_from_metadata_server() + self._secret_key = self._convert_key_to_str(self._secret_key) + + def _populate_keys_from_metadata_server(self): + # get_instance_metadata is imported here because of a circular + # dependency. + boto.log.debug("Retrieving credentials from metadata server.") + from boto.utils import get_instance_metadata + timeout = config.getfloat('Boto', 'metadata_service_timeout', 1.0) + attempts = config.getint('Boto', 'metadata_service_num_attempts', 1) + # The num_retries arg is actually the total number of attempts made, + # so the config options is named *_num_attempts to make this more + # clear to users. + metadata = get_instance_metadata( + timeout=timeout, num_retries=attempts, + data='meta-data/iam/security-credentials/') + if metadata: + # I'm assuming there's only one role on the instance profile. + security = list(metadata.values())[0] + self._access_key = security['AccessKeyId'] + self._secret_key = self._convert_key_to_str(security['SecretAccessKey']) + self._security_token = security['Token'] + expires_at = security['Expiration'] + self._credential_expiry_time = datetime.strptime( + expires_at, "%Y-%m-%dT%H:%M:%SZ") + boto.log.debug("Retrieved credentials will expire in %s at: %s", + self._credential_expiry_time - datetime.now(), expires_at) + + def _convert_key_to_str(self, key): + if isinstance(key, six.text_type): + # the secret key must be bytes and not unicode to work + # properly with hmac.new (see http://bugs.python.org/issue5285) + return str(key) + return key + + def configure_headers(self): + header_info_map = self.HeaderInfoMap[self.name] + self.metadata_prefix = header_info_map[METADATA_PREFIX_KEY] + self.header_prefix = header_info_map[HEADER_PREFIX_KEY] + self.acl_header = header_info_map[ACL_HEADER_KEY] + self.auth_header = header_info_map[AUTH_HEADER_KEY] + self.copy_source_header = header_info_map[COPY_SOURCE_HEADER_KEY] + self.copy_source_version_id = header_info_map[ + COPY_SOURCE_VERSION_ID_HEADER_KEY] + self.copy_source_range_header = header_info_map[ + COPY_SOURCE_RANGE_HEADER_KEY] + self.date_header = header_info_map[DATE_HEADER_KEY] + self.delete_marker = header_info_map[DELETE_MARKER_HEADER_KEY] + self.metadata_directive_header = ( + header_info_map[METADATA_DIRECTIVE_HEADER_KEY]) + self.security_token_header = header_info_map[SECURITY_TOKEN_HEADER_KEY] + self.resumable_upload_header = ( + header_info_map[RESUMABLE_UPLOAD_HEADER_KEY]) + self.server_side_encryption_header = header_info_map[SERVER_SIDE_ENCRYPTION_KEY] + self.storage_class_header = header_info_map[STORAGE_CLASS_HEADER_KEY] + self.version_id = header_info_map[VERSION_ID_HEADER_KEY] + self.mfa_header = header_info_map[MFA_HEADER_KEY] + self.restore_header = header_info_map[RESTORE_HEADER_KEY] + + def configure_errors(self): + error_map = self.ErrorMap[self.name] + self.storage_copy_error = error_map[STORAGE_COPY_ERROR] + self.storage_create_error = error_map[STORAGE_CREATE_ERROR] + self.storage_data_error = error_map[STORAGE_DATA_ERROR] + self.storage_permissions_error = error_map[STORAGE_PERMISSIONS_ERROR] + self.storage_response_error = error_map[STORAGE_RESPONSE_ERROR] + + def get_provider_name(self): + return self.HostKeyMap[self.name] + + def supports_chunked_transfer(self): + return self.ChunkedTransferSupport[self.name] + + +# Static utility method for getting default Provider. +def get_default(): + return Provider('aws') diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..303dbb66c9ab50c1b236dff1085f2edb6292e489 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/bootstrap.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/bootstrap.py new file mode 100644 index 0000000000000000000000000000000000000000..82c2822edd9a15d958c8325b1c7df94e0197ac44 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/bootstrap.py @@ -0,0 +1,134 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os +import boto +from boto.utils import get_instance_metadata, get_instance_userdata +from boto.pyami.config import Config, BotoConfigPath +from boto.pyami.scriptbase import ScriptBase +import time + +class Bootstrap(ScriptBase): + """ + The Bootstrap class is instantiated and run as part of the PyAMI + instance initialization process. The methods in this class will + be run from the rc.local script of the instance and will be run + as the root user. + + The main purpose of this class is to make sure the boto distribution + on the instance is the one required. + """ + + def __init__(self): + self.working_dir = '/mnt/pyami' + self.write_metadata() + super(Bootstrap, self).__init__() + + def write_metadata(self): + fp = open(os.path.expanduser(BotoConfigPath), 'w') + fp.write('[Instance]\n') + inst_data = get_instance_metadata() + for key in inst_data: + fp.write('%s = %s\n' % (key, inst_data[key])) + user_data = get_instance_userdata() + fp.write('\n%s\n' % user_data) + fp.write('[Pyami]\n') + fp.write('working_dir = %s\n' % self.working_dir) + fp.close() + # This file has the AWS credentials, should we lock it down? + # os.chmod(BotoConfigPath, stat.S_IREAD | stat.S_IWRITE) + # now that we have written the file, read it into a pyami Config object + boto.config = Config() + boto.init_logging() + + def create_working_dir(self): + boto.log.info('Working directory: %s' % self.working_dir) + if not os.path.exists(self.working_dir): + os.mkdir(self.working_dir) + + def load_boto(self): + update = boto.config.get('Boto', 'boto_update', 'svn:HEAD') + if update.startswith('svn'): + if update.find(':') >= 0: + method, version = update.split(':') + version = '-r%s' % version + else: + version = '-rHEAD' + location = boto.config.get('Boto', 'boto_location', '/usr/local/boto') + self.run('svn update %s %s' % (version, location)) + elif update.startswith('git'): + location = boto.config.get('Boto', 'boto_location', '/usr/share/python-support/python-boto/boto') + num_remaining_attempts = 10 + while num_remaining_attempts > 0: + num_remaining_attempts -= 1 + try: + self.run('git pull', cwd=location) + num_remaining_attempts = 0 + except Exception as e: + boto.log.info('git pull attempt failed with the following exception. Trying again in a bit. %s', e) + time.sleep(2) + if update.find(':') >= 0: + method, version = update.split(':') + else: + version = 'master' + self.run('git checkout %s' % version, cwd=location) + else: + # first remove the symlink needed when running from subversion + self.run('rm /usr/local/lib/python2.5/site-packages/boto') + self.run('easy_install %s' % update) + + def fetch_s3_file(self, s3_file): + try: + from boto.utils import fetch_file + f = fetch_file(s3_file) + path = os.path.join(self.working_dir, s3_file.split("/")[-1]) + open(path, "w").write(f.read()) + except: + boto.log.exception('Problem Retrieving file: %s' % s3_file) + path = None + return path + + def load_packages(self): + package_str = boto.config.get('Pyami', 'packages') + if package_str: + packages = package_str.split(',') + for package in packages: + package = package.strip() + if package.startswith('s3:'): + package = self.fetch_s3_file(package) + if package: + # if the "package" is really a .py file, it doesn't have to + # be installed, just being in the working dir is enough + if not package.endswith('.py'): + self.run('easy_install -Z %s' % package, exit_on_error=False) + + def main(self): + self.create_working_dir() + self.load_boto() + self.load_packages() + self.notify('Bootstrap Completed for %s' % boto.config.get_instance('instance-id')) + +if __name__ == "__main__": + # because bootstrap starts before any logging configuration can be loaded from + # the boto config files, we will manually enable logging to /var/log/boto.log + boto.set_file_logger('bootstrap', '/var/log/boto.log') + bs = Bootstrap() + bs.main() diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/config.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/config.py new file mode 100644 index 0000000000000000000000000000000000000000..a2194898963d3d4bd05634d5888a69e8052658b4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/config.py @@ -0,0 +1,225 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os +import re +import warnings + +import boto + +from boto.compat import expanduser, ConfigParser, StringIO + + +# By default we use two locations for the boto configurations, +# /etc/boto.cfg and ~/.boto (which works on Windows and Unix). +BotoConfigPath = '/etc/boto.cfg' +BotoConfigLocations = [BotoConfigPath] +UserConfigPath = os.path.join(expanduser('~'), '.boto') +BotoConfigLocations.append(UserConfigPath) + +# If there's a BOTO_CONFIG variable set, we load ONLY +# that variable +if 'BOTO_CONFIG' in os.environ: + BotoConfigLocations = [expanduser(os.environ['BOTO_CONFIG'])] + +# If there's a BOTO_PATH variable set, we use anything there +# as the current configuration locations, split with os.pathsep. +elif 'BOTO_PATH' in os.environ: + BotoConfigLocations = [] + for path in os.environ['BOTO_PATH'].split(os.pathsep): + BotoConfigLocations.append(expanduser(path)) + + +class Config(ConfigParser): + + def __init__(self, path=None, fp=None, do_load=True): + # We don't use ``super`` here, because ``ConfigParser`` still uses + # old-style classes. + ConfigParser.__init__(self, {'working_dir': '/mnt/pyami', + 'debug': '0'}) + if do_load: + if path: + self.load_from_path(path) + elif fp: + self.readfp(fp) + else: + self.read(BotoConfigLocations) + if "AWS_CREDENTIAL_FILE" in os.environ: + full_path = expanduser(os.environ['AWS_CREDENTIAL_FILE']) + try: + self.load_credential_file(full_path) + except IOError: + warnings.warn('Unable to load AWS_CREDENTIAL_FILE (%s)' % full_path) + + def load_credential_file(self, path): + """Load a credential file as is setup like the Java utilities""" + c_data = StringIO() + c_data.write("[Credentials]\n") + for line in open(path, "r").readlines(): + c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key")) + c_data.seek(0) + self.readfp(c_data) + + def load_from_path(self, path): + file = open(path) + for line in file.readlines(): + match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line) + if match: + extended_file = match.group(1) + (dir, file) = os.path.split(path) + self.load_from_path(os.path.join(dir, extended_file)) + self.read(path) + + def save_option(self, path, section, option, value): + """ + Write the specified Section.Option to the config file specified by path. + Replace any previous value. If the path doesn't exist, create it. + Also add the option the the in-memory config. + """ + config = ConfigParser() + config.read(path) + if not config.has_section(section): + config.add_section(section) + config.set(section, option, value) + fp = open(path, 'w') + config.write(fp) + fp.close() + if not self.has_section(section): + self.add_section(section) + self.set(section, option, value) + + def save_user_option(self, section, option, value): + self.save_option(UserConfigPath, section, option, value) + + def save_system_option(self, section, option, value): + self.save_option(BotoConfigPath, section, option, value) + + def get_instance(self, name, default=None): + try: + val = self.get('Instance', name) + except: + val = default + return val + + def get_user(self, name, default=None): + try: + val = self.get('User', name) + except: + val = default + return val + + def getint_user(self, name, default=0): + try: + val = self.getint('User', name) + except: + val = default + return val + + def get_value(self, section, name, default=None): + return self.get(section, name, default) + + def get(self, section, name, default=None): + try: + val = ConfigParser.get(self, section, name) + except: + val = default + return val + + def getint(self, section, name, default=0): + try: + val = ConfigParser.getint(self, section, name) + except: + val = int(default) + return val + + def getfloat(self, section, name, default=0.0): + try: + val = ConfigParser.getfloat(self, section, name) + except: + val = float(default) + return val + + def getbool(self, section, name, default=False): + if self.has_option(section, name): + val = self.get(section, name) + if val.lower() == 'true': + val = True + else: + val = False + else: + val = default + return val + + def setbool(self, section, name, value): + if value: + self.set(section, name, 'true') + else: + self.set(section, name, 'false') + + def dump(self): + s = StringIO() + self.write(s) + print(s.getvalue()) + + def dump_safe(self, fp=None): + if not fp: + fp = StringIO() + for section in self.sections(): + fp.write('[%s]\n' % section) + for option in self.options(section): + if option == 'aws_secret_access_key': + fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option) + else: + fp.write('%s = %s\n' % (option, self.get(section, option))) + + def dump_to_sdb(self, domain_name, item_name): + from boto.compat import json + sdb = boto.connect_sdb() + domain = sdb.lookup(domain_name) + if not domain: + domain = sdb.create_domain(domain_name) + item = domain.new_item(item_name) + item.active = False + for section in self.sections(): + d = {} + for option in self.options(section): + d[option] = self.get(section, option) + item[section] = json.dumps(d) + item.save() + + def load_from_sdb(self, domain_name, item_name): + from boto.compat import json + sdb = boto.connect_sdb() + domain = sdb.lookup(domain_name) + item = domain.get_item(item_name) + for section in item.keys(): + if not self.has_section(section): + self.add_section(section) + d = json.loads(item[section]) + for attr_name in d.keys(): + attr_value = d[attr_name] + if attr_value is None: + attr_value = 'None' + if isinstance(attr_value, bool): + self.setbool(section, attr_name, attr_value) + else: + self.set(section, attr_name, attr_value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/copybot.cfg b/desktop/core/ext-py/boto-2.38.0/boto/pyami/copybot.cfg new file mode 100644 index 0000000000000000000000000000000000000000..cbfdc5ad195d3710c3c0c65993f5103e73abd668 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/copybot.cfg @@ -0,0 +1,60 @@ +# +# Your AWS Credentials +# +[Credentials] +aws_access_key_id = +aws_secret_access_key = + +# +# If you want to use a separate set of credentials when writing +# to the destination bucket, put them here +#dest_aws_access_key_id = +#dest_aws_secret_access_key = + +# +# Fill out this section if you want emails from CopyBot +# when it starts and stops +# +[Notification] +#smtp_host = +#smtp_user = +#smtp_pass = +#smtp_from = +#smtp_to = + +# +# If you leave this section as is, it will automatically +# update boto from subversion upon start up. +# If you don't want that to happen, comment this out +# +[Boto] +boto_location = /usr/local/boto +boto_update = svn:HEAD + +# +# This tells the Pyami code in boto what scripts +# to run during startup +# +[Pyami] +scripts = boto.pyami.copybot.CopyBot + +# +# Source bucket and Destination Bucket, obviously. +# If the Destination bucket does not exist, it will +# attempt to create it. +# If exit_on_completion is false, the instance +# will keep running after the copy operation is +# complete which might be handy for debugging. +# If copy_acls is false, the ACL's will not be +# copied with the objects to the new bucket. +# If replace_dst is false, copybot will not +# will only store the source file in the dest if +# that file does not already exist. If it's true +# it will replace it even if it does exist. +# +[CopyBot] +src_bucket = +dst_bucket = +exit_on_completion = true +copy_acls = true +replace_dst = true diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/copybot.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/copybot.py new file mode 100644 index 0000000000000000000000000000000000000000..09a6d444c5062cf532313559ada2b6ba8ab7c937 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/copybot.py @@ -0,0 +1,96 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import boto +from boto.pyami.scriptbase import ScriptBase +import os, StringIO + +class CopyBot(ScriptBase): + + def __init__(self): + super(CopyBot, self).__init__() + self.wdir = boto.config.get('Pyami', 'working_dir') + self.log_file = '%s.log' % self.instance_id + self.log_path = os.path.join(self.wdir, self.log_file) + boto.set_file_logger(self.name, self.log_path) + self.src_name = boto.config.get(self.name, 'src_bucket') + self.dst_name = boto.config.get(self.name, 'dst_bucket') + self.replace = boto.config.getbool(self.name, 'replace_dst', True) + s3 = boto.connect_s3() + self.src = s3.lookup(self.src_name) + if not self.src: + boto.log.error('Source bucket does not exist: %s' % self.src_name) + dest_access_key = boto.config.get(self.name, 'dest_aws_access_key_id', None) + if dest_access_key: + dest_secret_key = boto.config.get(self.name, 'dest_aws_secret_access_key', None) + s3 = boto.connect(dest_access_key, dest_secret_key) + self.dst = s3.lookup(self.dst_name) + if not self.dst: + self.dst = s3.create_bucket(self.dst_name) + + def copy_bucket_acl(self): + if boto.config.get(self.name, 'copy_acls', True): + acl = self.src.get_xml_acl() + self.dst.set_xml_acl(acl) + + def copy_key_acl(self, src, dst): + if boto.config.get(self.name, 'copy_acls', True): + acl = src.get_xml_acl() + dst.set_xml_acl(acl) + + def copy_keys(self): + boto.log.info('src=%s' % self.src.name) + boto.log.info('dst=%s' % self.dst.name) + try: + for key in self.src: + if not self.replace: + exists = self.dst.lookup(key.name) + if exists: + boto.log.info('key=%s already exists in %s, skipping' % (key.name, self.dst.name)) + continue + boto.log.info('copying %d bytes from key=%s' % (key.size, key.name)) + prefix, base = os.path.split(key.name) + path = os.path.join(self.wdir, base) + key.get_contents_to_filename(path) + new_key = self.dst.new_key(key.name) + new_key.set_contents_from_filename(path) + self.copy_key_acl(key, new_key) + os.unlink(path) + except: + boto.log.exception('Error copying key: %s' % key.name) + + def copy_log(self): + key = self.dst.new_key(self.log_file) + key.set_contents_from_filename(self.log_path) + + def main(self): + fp = StringIO.StringIO() + boto.config.dump_safe(fp) + self.notify('%s (%s) Starting' % (self.name, self.instance_id), fp.getvalue()) + if self.src and self.dst: + self.copy_keys() + if self.dst: + self.copy_log() + self.notify('%s (%s) Stopping' % (self.name, self.instance_id), + 'Copy Operation Complete') + if boto.config.getbool(self.name, 'exit_on_completion', True): + ec2 = boto.connect_ec2() + ec2.terminate_instances([self.instance_id]) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/helloworld.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/helloworld.py new file mode 100644 index 0000000000000000000000000000000000000000..b9b53b60c514f5e2f10d9a10f790edc33584e5a3 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/helloworld.py @@ -0,0 +1,27 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.scriptbase import ScriptBase + +class HelloWorld(ScriptBase): + + def main(self): + self.log('Hello World!!!') diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..44abd0d24a921b8952df7325e33dffbad76f4835 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/__init__.py @@ -0,0 +1,63 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.scriptbase import ScriptBase + + +class Installer(ScriptBase): + """ + Abstract base class for installers + """ + + def add_cron(self, name, minute, hour, mday, month, wday, who, command, env=None): + """ + Add an entry to the system crontab. + """ + raise NotImplementedError + + def add_init_script(self, file): + """ + Add this file to the init.d directory + """ + + def add_env(self, key, value): + """ + Add an environemnt variable + """ + raise NotImplementedError + + def stop(self, service_name): + """ + Stop a service. + """ + raise NotImplementedError + + def start(self, service_name): + """ + Start a service. + """ + raise NotImplementedError + + def install(self): + """ + Do whatever is necessary to "install" the package. + """ + raise NotImplementedError diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..60ee658e34a9c2fa23aed4fb1cf75c8ce513ae12 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/apache.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/apache.py new file mode 100644 index 0000000000000000000000000000000000000000..febc2dfa25e1d06c6421ac8666525746cc0d48a8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/apache.py @@ -0,0 +1,43 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.installers.ubuntu.installer import Installer + +class Apache(Installer): + """ + Install apache2, mod_python, and libapache2-svn + """ + + def install(self): + self.run("apt-get update") + self.run('apt-get -y install apache2', notify=True, exit_on_error=True) + self.run('apt-get -y install libapache2-mod-python', notify=True, exit_on_error=True) + self.run('a2enmod rewrite', notify=True, exit_on_error=True) + self.run('a2enmod ssl', notify=True, exit_on_error=True) + self.run('a2enmod proxy', notify=True, exit_on_error=True) + self.run('a2enmod proxy_ajp', notify=True, exit_on_error=True) + + # Hard reboot the apache2 server to enable these module + self.stop("apache2") + self.start("apache2") + + def main(self): + self.install() diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/ebs.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/ebs.py new file mode 100644 index 0000000000000000000000000000000000000000..54a479859dca5d953c9bc432838ed21486169b66 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/ebs.py @@ -0,0 +1,238 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +Automated installer to attach, format and mount an EBS volume. +This installer assumes that you want the volume formatted as +an XFS file system. To drive this installer, you need the +following section in the boto config passed to the new instance. +You also need to install dateutil by listing python-dateutil +in the list of packages to be installed in the Pyami seciont +of your boto config file. + +If there is already a device mounted at the specified mount point, +the installer assumes that it is the ephemeral drive and unmounts +it, remounts it as /tmp and chmods it to 777. + +Config file section:: + + [EBS] + volume_id = + logical_volume_name = + device = + mount_point = + +""" +import boto +from boto.manage.volume import Volume +from boto.exception import EC2ResponseError +import os, time +from boto.pyami.installers.ubuntu.installer import Installer +from string import Template + +BackupScriptTemplate = """#!/usr/bin/env python +# Backup EBS volume +import boto +from boto.pyami.scriptbase import ScriptBase +import traceback + +class Backup(ScriptBase): + + def main(self): + try: + ec2 = boto.connect_ec2() + self.run("/usr/sbin/xfs_freeze -f ${mount_point}", exit_on_error = True) + snapshot = ec2.create_snapshot('${volume_id}') + boto.log.info("Snapshot created: %s " % snapshot) + except Exception, e: + self.notify(subject="${instance_id} Backup Failed", body=traceback.format_exc()) + boto.log.info("Snapshot created: ${volume_id}") + except Exception, e: + self.notify(subject="${instance_id} Backup Failed", body=traceback.format_exc()) + finally: + self.run("/usr/sbin/xfs_freeze -u ${mount_point}") + +if __name__ == "__main__": + b = Backup() + b.main() +""" + +BackupCleanupScript= """#!/usr/bin/env python +import boto +from boto.manage.volume import Volume + +# Cleans Backups of EBS volumes + +for v in Volume.all(): + v.trim_snapshots(True) +""" + +TagBasedBackupCleanupScript= """#!/usr/bin/env python +import boto + +# Cleans Backups of EBS volumes + +ec2 = boto.connect_ec2() +ec2.trim_snapshots() +""" + +class EBSInstaller(Installer): + """ + Set up the EBS stuff + """ + + def __init__(self, config_file=None): + super(EBSInstaller, self).__init__(config_file) + self.instance_id = boto.config.get('Instance', 'instance-id') + self.device = boto.config.get('EBS', 'device', '/dev/sdp') + self.volume_id = boto.config.get('EBS', 'volume_id') + self.logical_volume_name = boto.config.get('EBS', 'logical_volume_name') + self.mount_point = boto.config.get('EBS', 'mount_point', '/ebs') + + def attach(self): + ec2 = boto.connect_ec2() + if self.logical_volume_name: + # if a logical volume was specified, override the specified volume_id + # (if there was one) with the current AWS volume for the logical volume: + logical_volume = next(Volume.find(name=self.logical_volume_name)) + self.volume_id = logical_volume._volume_id + volume = ec2.get_all_volumes([self.volume_id])[0] + # wait for the volume to be available. The volume may still be being created + # from a snapshot. + while volume.update() != 'available': + boto.log.info('Volume %s not yet available. Current status = %s.' % (volume.id, volume.status)) + time.sleep(5) + instance = ec2.get_only_instances([self.instance_id])[0] + attempt_attach = True + while attempt_attach: + try: + ec2.attach_volume(self.volume_id, self.instance_id, self.device) + attempt_attach = False + except EC2ResponseError as e: + if e.error_code != 'IncorrectState': + # if there's an EC2ResonseError with the code set to IncorrectState, delay a bit for ec2 + # to realize the instance is running, then try again. Otherwise, raise the error: + boto.log.info('Attempt to attach the EBS volume %s to this instance (%s) returned %s. Trying again in a bit.' % (self.volume_id, self.instance_id, e.errors)) + time.sleep(2) + else: + raise e + boto.log.info('Attached volume %s to instance %s as device %s' % (self.volume_id, self.instance_id, self.device)) + # now wait for the volume device to appear + while not os.path.exists(self.device): + boto.log.info('%s still does not exist, waiting 2 seconds' % self.device) + time.sleep(2) + + def make_fs(self): + boto.log.info('make_fs...') + has_fs = self.run('fsck %s' % self.device) + if has_fs != 0: + self.run('mkfs -t xfs %s' % self.device) + + def create_backup_script(self): + t = Template(BackupScriptTemplate) + s = t.substitute(volume_id=self.volume_id, instance_id=self.instance_id, + mount_point=self.mount_point) + fp = open('/usr/local/bin/ebs_backup', 'w') + fp.write(s) + fp.close() + self.run('chmod +x /usr/local/bin/ebs_backup') + + def create_backup_cleanup_script(self, use_tag_based_cleanup=False): + fp = open('/usr/local/bin/ebs_backup_cleanup', 'w') + if use_tag_based_cleanup: + fp.write(TagBasedBackupCleanupScript) + else: + fp.write(BackupCleanupScript) + fp.close() + self.run('chmod +x /usr/local/bin/ebs_backup_cleanup') + + def handle_mount_point(self): + boto.log.info('handle_mount_point') + if not os.path.isdir(self.mount_point): + boto.log.info('making directory') + # mount directory doesn't exist so create it + self.run("mkdir %s" % self.mount_point) + else: + boto.log.info('directory exists already') + self.run('mount -l') + lines = self.last_command.output.split('\n') + for line in lines: + t = line.split() + if t and t[2] == self.mount_point: + # something is already mounted at the mount point + # unmount that and mount it as /tmp + if t[0] != self.device: + self.run('umount %s' % self.mount_point) + self.run('mount %s /tmp' % t[0]) + break + self.run('chmod 777 /tmp') + # Mount up our new EBS volume onto mount_point + self.run("mount %s %s" % (self.device, self.mount_point)) + self.run('xfs_growfs %s' % self.mount_point) + + def update_fstab(self): + f = open("/etc/fstab", "a") + f.write('%s\t%s\txfs\tdefaults 0 0\n' % (self.device, self.mount_point)) + f.close() + + def install(self): + # First, find and attach the volume + self.attach() + + # Install the xfs tools + self.run('apt-get -y install xfsprogs xfsdump') + + # Check to see if the filesystem was created or not + self.make_fs() + + # create the /ebs directory for mounting + self.handle_mount_point() + + # create the backup script + self.create_backup_script() + + # Set up the backup script + minute = boto.config.get('EBS', 'backup_cron_minute', '0') + hour = boto.config.get('EBS', 'backup_cron_hour', '4,16') + self.add_cron("ebs_backup", "/usr/local/bin/ebs_backup", minute=minute, hour=hour) + + # Set up the backup cleanup script + minute = boto.config.get('EBS', 'backup_cleanup_cron_minute') + hour = boto.config.get('EBS', 'backup_cleanup_cron_hour') + if (minute is not None) and (hour is not None): + # Snapshot clean up can either be done via the manage module, or via the new tag based + # snapshot code, if the snapshots have been tagged with the name of the associated + # volume. Check for the presence of the new configuration flag, and use the appropriate + # cleanup method / script: + use_tag_based_cleanup = boto.config.has_option('EBS', 'use_tag_based_snapshot_cleanup') + self.create_backup_cleanup_script(use_tag_based_cleanup) + self.add_cron("ebs_backup_cleanup", "/usr/local/bin/ebs_backup_cleanup", minute=minute, hour=hour) + + # Set up the fstab + self.update_fstab() + + def main(self): + if not os.path.exists(self.device): + self.install() + else: + boto.log.info("Device %s is already attached, skipping EBS Installer" % self.device) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/installer.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/installer.py new file mode 100644 index 0000000000000000000000000000000000000000..5a2abd908b4df177d568065d8f099fc4f1029866 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/installer.py @@ -0,0 +1,94 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import boto.pyami.installers +import os +import os.path +import stat +import boto +import random +from pwd import getpwnam + +class Installer(boto.pyami.installers.Installer): + """ + Base Installer class for Ubuntu-based AMI's + """ + def add_cron(self, name, command, minute="*", hour="*", mday="*", month="*", wday="*", who="root", env=None): + """ + Write a file to /etc/cron.d to schedule a command + env is a dict containing environment variables you want to set in the file + name will be used as the name of the file + """ + if minute == 'random': + minute = str(random.randrange(60)) + if hour == 'random': + hour = str(random.randrange(24)) + fp = open('/etc/cron.d/%s' % name, "w") + if env: + for key, value in env.items(): + fp.write('%s=%s\n' % (key, value)) + fp.write('%s %s %s %s %s %s %s\n' % (minute, hour, mday, month, wday, who, command)) + fp.close() + + def add_init_script(self, file, name): + """ + Add this file to the init.d directory + """ + f_path = os.path.join("/etc/init.d", name) + f = open(f_path, "w") + f.write(file) + f.close() + os.chmod(f_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) + self.run("/usr/sbin/update-rc.d %s defaults" % name) + + def add_env(self, key, value): + """ + Add an environemnt variable + For Ubuntu, the best place is /etc/environment. Values placed here do + not need to be exported. + """ + boto.log.info('Adding env variable: %s=%s' % (key, value)) + if not os.path.exists("/etc/environment.orig"): + self.run('cp /etc/environment /etc/environment.orig', notify=False, exit_on_error=False) + fp = open('/etc/environment', 'a') + fp.write('\n%s="%s"' % (key, value)) + fp.close() + os.environ[key] = value + + def stop(self, service_name): + self.run('/etc/init.d/%s stop' % service_name) + + def start(self, service_name): + self.run('/etc/init.d/%s start' % service_name) + + def create_user(self, user): + """ + Create a user on the local system + """ + self.run("useradd -m %s" % user) + usr = getpwnam(user) + return usr + + def install(self): + """ + This is the only method you need to override + """ + raise NotImplementedError diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/mysql.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/mysql.py new file mode 100644 index 0000000000000000000000000000000000000000..5b0792ba9da9ebe0485db68ed2b3c6796b100d57 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/mysql.py @@ -0,0 +1,108 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This installer will install mysql-server on an Ubuntu machine. +In addition to the normal installation done by apt-get, it will +also configure the new MySQL server to store it's data files in +a different location. By default, this is /mnt but that can be +configured in the [MySQL] section of the boto config file passed +to the instance. +""" +from boto.pyami.installers.ubuntu.installer import Installer +import os +import boto +from boto.utils import ShellCommand +from boto.compat import ConfigParser +import time + +ConfigSection = """ +[MySQL] +root_password = +data_dir = +""" + +class MySQL(Installer): + + def install(self): + self.run('apt-get update') + self.run('apt-get -y install mysql-server', notify=True, exit_on_error=True) + +# def set_root_password(self, password=None): +# if not password: +# password = boto.config.get('MySQL', 'root_password') +# if password: +# self.run('mysqladmin -u root password %s' % password) +# return password + + def change_data_dir(self, password=None): + data_dir = boto.config.get('MySQL', 'data_dir', '/mnt') + fresh_install = False + is_mysql_running_command = ShellCommand('mysqladmin ping') # exit status 0 if mysql is running + is_mysql_running_command.run() + if is_mysql_running_command.getStatus() == 0: + # mysql is running. This is the state apt-get will leave it in. If it isn't running, + # that means mysql was already installed on the AMI and there's no need to stop it, + # saving 40 seconds on instance startup. + time.sleep(10) #trying to stop mysql immediately after installing it fails + # We need to wait until mysql creates the root account before we kill it + # or bad things will happen + i = 0 + while self.run("echo 'quit' | mysql -u root") != 0 and i < 5: + time.sleep(5) + i = i + 1 + self.run('/etc/init.d/mysql stop') + self.run("pkill -9 mysql") + + mysql_path = os.path.join(data_dir, 'mysql') + if not os.path.exists(mysql_path): + self.run('mkdir %s' % mysql_path) + fresh_install = True + self.run('chown -R mysql:mysql %s' % mysql_path) + fp = open('/etc/mysql/conf.d/use_mnt.cnf', 'w') + fp.write('# created by pyami\n') + fp.write('# use the %s volume for data\n' % data_dir) + fp.write('[mysqld]\n') + fp.write('datadir = %s\n' % mysql_path) + fp.write('log_bin = %s\n' % os.path.join(mysql_path, 'mysql-bin.log')) + fp.close() + if fresh_install: + self.run('cp -pr /var/lib/mysql/* %s/' % mysql_path) + self.start('mysql') + else: + #get the password ubuntu expects to use: + config_parser = ConfigParser() + config_parser.read('/etc/mysql/debian.cnf') + password = config_parser.get('client', 'password') + # start the mysql deamon, then mysql with the required grant statement piped into it: + self.start('mysql') + time.sleep(10) #time for mysql to start + grant_command = "echo \"GRANT ALL PRIVILEGES ON *.* TO 'debian-sys-maint'@'localhost' IDENTIFIED BY '%s' WITH GRANT OPTION;\" | mysql" % password + while self.run(grant_command) != 0: + time.sleep(5) + # leave mysqld running + + def main(self): + self.install() + # change_data_dir runs 'mysql -u root' which assumes there is no mysql password, i + # and changing that is too ugly to be worth it: + #self.set_root_password() + self.change_data_dir() diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/trac.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/trac.py new file mode 100644 index 0000000000000000000000000000000000000000..8c51c8f7201d3134d411dac1dc54021a560f61ba --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/trac.py @@ -0,0 +1,139 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.installers.ubuntu.installer import Installer +import boto +import os + +class Trac(Installer): + """ + Install Trac and DAV-SVN + Sets up a Vhost pointing to [Trac]->home + Using the config parameter [Trac]->hostname + Sets up a trac environment for every directory found under [Trac]->data_dir + + [Trac] + name = My Foo Server + hostname = trac.foo.com + home = /mnt/sites/trac + data_dir = /mnt/trac + svn_dir = /mnt/subversion + server_admin = root@foo.com + sdb_auth_domain = users + # Optional + SSLCertificateFile = /mnt/ssl/foo.crt + SSLCertificateKeyFile = /mnt/ssl/foo.key + SSLCertificateChainFile = /mnt/ssl/FooCA.crt + + """ + + def install(self): + self.run('apt-get -y install trac', notify=True, exit_on_error=True) + self.run('apt-get -y install libapache2-svn', notify=True, exit_on_error=True) + self.run("a2enmod ssl") + self.run("a2enmod mod_python") + self.run("a2enmod dav_svn") + self.run("a2enmod rewrite") + # Make sure that boto.log is writable by everyone so that subversion post-commit hooks can + # write to it. + self.run("touch /var/log/boto.log") + self.run("chmod a+w /var/log/boto.log") + + def setup_vhost(self): + domain = boto.config.get("Trac", "hostname").strip() + if domain: + domain_info = domain.split('.') + cnf = open("/etc/apache2/sites-available/%s" % domain_info[0], "w") + cnf.write("NameVirtualHost *:80\n") + if boto.config.get("Trac", "SSLCertificateFile"): + cnf.write("NameVirtualHost *:443\n\n") + cnf.write("\n") + cnf.write("\tServerAdmin %s\n" % boto.config.get("Trac", "server_admin").strip()) + cnf.write("\tServerName %s\n" % domain) + cnf.write("\tRewriteEngine On\n") + cnf.write("\tRewriteRule ^(.*)$ https://%s$1\n" % domain) + cnf.write("\n\n") + + cnf.write("\n") + else: + cnf.write("\n") + + cnf.write("\tServerAdmin %s\n" % boto.config.get("Trac", "server_admin").strip()) + cnf.write("\tServerName %s\n" % domain) + cnf.write("\tDocumentRoot %s\n" % boto.config.get("Trac", "home").strip()) + + cnf.write("\t\n" % boto.config.get("Trac", "home").strip()) + cnf.write("\t\tOptions FollowSymLinks Indexes MultiViews\n") + cnf.write("\t\tAllowOverride All\n") + cnf.write("\t\tOrder allow,deny\n") + cnf.write("\t\tallow from all\n") + cnf.write("\t\n") + + cnf.write("\t\n") + cnf.write("\t\tAuthType Basic\n") + cnf.write("\t\tAuthName \"%s\"\n" % boto.config.get("Trac", "name")) + cnf.write("\t\tRequire valid-user\n") + cnf.write("\t\tAuthUserFile /mnt/apache/passwd/passwords\n") + cnf.write("\t\n") + + data_dir = boto.config.get("Trac", "data_dir") + for env in os.listdir(data_dir): + if(env[0] != "."): + cnf.write("\t\n" % env) + cnf.write("\t\tSetHandler mod_python\n") + cnf.write("\t\tPythonInterpreter main_interpreter\n") + cnf.write("\t\tPythonHandler trac.web.modpython_frontend\n") + cnf.write("\t\tPythonOption TracEnv %s/%s\n" % (data_dir, env)) + cnf.write("\t\tPythonOption TracUriRoot /trac/%s\n" % env) + cnf.write("\t\n") + + svn_dir = boto.config.get("Trac", "svn_dir") + for env in os.listdir(svn_dir): + if(env[0] != "."): + cnf.write("\t\n" % env) + cnf.write("\t\tDAV svn\n") + cnf.write("\t\tSVNPath %s/%s\n" % (svn_dir, env)) + cnf.write("\t\n") + + cnf.write("\tErrorLog /var/log/apache2/error.log\n") + cnf.write("\tLogLevel warn\n") + cnf.write("\tCustomLog /var/log/apache2/access.log combined\n") + cnf.write("\tServerSignature On\n") + SSLCertificateFile = boto.config.get("Trac", "SSLCertificateFile") + if SSLCertificateFile: + cnf.write("\tSSLEngine On\n") + cnf.write("\tSSLCertificateFile %s\n" % SSLCertificateFile) + + SSLCertificateKeyFile = boto.config.get("Trac", "SSLCertificateKeyFile") + if SSLCertificateKeyFile: + cnf.write("\tSSLCertificateKeyFile %s\n" % SSLCertificateKeyFile) + + SSLCertificateChainFile = boto.config.get("Trac", "SSLCertificateChainFile") + if SSLCertificateChainFile: + cnf.write("\tSSLCertificateChainFile %s\n" % SSLCertificateChainFile) + cnf.write("\n") + cnf.close() + self.run("a2ensite %s" % domain_info[0]) + self.run("/etc/init.d/apache2 force-reload") + + def main(self): + self.install() + self.setup_vhost() diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/launch_ami.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/launch_ami.py new file mode 100755 index 0000000000000000000000000000000000000000..9037217b617a7873f8529c345e4a065b610de3e8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/launch_ami.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import getopt +import sys +import imp +import time +import boto + +usage_string = """ +SYNOPSIS + launch_ami.py -a ami_id [-b script_bucket] [-s script_name] + [-m module] [-c class_name] [-r] + [-g group] [-k key_name] [-n num_instances] + [-w] [extra_data] + Where: + ami_id - the id of the AMI you wish to launch + module - The name of the Python module containing the class you + want to run when the instance is started. If you use this + option the Python module must already be stored on the + instance in a location that is on the Python path. + script_file - The name of a local Python module that you would like + to have copied to S3 and then run on the instance + when it is started. The specified module must be + import'able (i.e. in your local Python path). It + will then be copied to the specified bucket in S3 + (see the -b option). Once the new instance(s) + start up the script will be copied from S3 and then + run locally on the instance. + class_name - The name of the class to be instantiated within the + module or script file specified. + script_bucket - the name of the bucket in which the script will be + stored + group - the name of the security group the instance will run in + key_name - the name of the keypair to use when launching the AMI + num_instances - how many instances of the AMI to launch (default 1) + input_queue_name - Name of SQS to read input messages from + output_queue_name - Name of SQS to write output messages to + extra_data - additional name-value pairs that will be passed as + userdata to the newly launched instance. These should + be of the form "name=value" + The -r option reloads the Python module to S3 without launching + another instance. This can be useful during debugging to allow + you to test a new version of your script without shutting down + your instance and starting up another one. + The -w option tells the script to run synchronously, meaning to + wait until the instance is actually up and running. It then prints + the IP address and internal and external DNS names before exiting. +""" + +def usage(): + print(usage_string) + sys.exit() + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], 'a:b:c:g:hi:k:m:n:o:rs:w', + ['ami', 'bucket', 'class', 'group', 'help', + 'inputqueue', 'keypair', 'module', + 'numinstances', 'outputqueue', + 'reload', 'script_name', 'wait']) + except: + usage() + params = {'module_name': None, + 'script_name': None, + 'class_name': None, + 'script_bucket': None, + 'group': 'default', + 'keypair': None, + 'ami': None, + 'num_instances': 1, + 'input_queue_name': None, + 'output_queue_name': None} + reload = None + wait = None + for o, a in opts: + if o in ('-a', '--ami'): + params['ami'] = a + if o in ('-b', '--bucket'): + params['script_bucket'] = a + if o in ('-c', '--class'): + params['class_name'] = a + if o in ('-g', '--group'): + params['group'] = a + if o in ('-h', '--help'): + usage() + if o in ('-i', '--inputqueue'): + params['input_queue_name'] = a + if o in ('-k', '--keypair'): + params['keypair'] = a + if o in ('-m', '--module'): + params['module_name'] = a + if o in ('-n', '--num_instances'): + params['num_instances'] = int(a) + if o in ('-o', '--outputqueue'): + params['output_queue_name'] = a + if o in ('-r', '--reload'): + reload = True + if o in ('-s', '--script'): + params['script_name'] = a + if o in ('-w', '--wait'): + wait = True + + # check required fields + required = ['ami'] + for pname in required: + if not params.get(pname, None): + print('%s is required' % pname) + usage() + if params['script_name']: + # first copy the desired module file to S3 bucket + if reload: + print('Reloading module %s to S3' % params['script_name']) + else: + print('Copying module %s to S3' % params['script_name']) + l = imp.find_module(params['script_name']) + c = boto.connect_s3() + bucket = c.get_bucket(params['script_bucket']) + key = bucket.new_key(params['script_name'] + '.py') + key.set_contents_from_file(l[0]) + params['script_md5'] = key.md5 + # we have everything we need, now build userdata string + l = [] + for k, v in params.items(): + if v: + l.append('%s=%s' % (k, v)) + c = boto.connect_ec2() + l.append('aws_access_key_id=%s' % c.aws_access_key_id) + l.append('aws_secret_access_key=%s' % c.aws_secret_access_key) + for kv in args: + l.append(kv) + s = '|'.join(l) + if not reload: + rs = c.get_all_images([params['ami']]) + img = rs[0] + r = img.run(user_data=s, key_name=params['keypair'], + security_groups=[params['group']], + max_count=params.get('num_instances', 1)) + print('AMI: %s - %s (Started)' % (params['ami'], img.location)) + print('Reservation %s contains the following instances:' % r.id) + for i in r.instances: + print('\t%s' % i.id) + if wait: + running = False + while not running: + time.sleep(30) + [i.update() for i in r.instances] + status = [i.state for i in r.instances] + print(status) + if status.count('running') == len(r.instances): + running = True + for i in r.instances: + print('Instance: %s' % i.ami_launch_index) + print('Public DNS Name: %s' % i.public_dns_name) + print('Private DNS Name: %s' % i.private_dns_name) + +if __name__ == "__main__": + main() diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/scriptbase.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/scriptbase.py new file mode 100644 index 0000000000000000000000000000000000000000..d99a2b46e0978b726f27235645c686b1acdfbbce --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/scriptbase.py @@ -0,0 +1,43 @@ +import os +import sys +from boto.utils import ShellCommand, get_ts +import boto +import boto.utils + +class ScriptBase(object): + + def __init__(self, config_file=None): + self.instance_id = boto.config.get('Instance', 'instance-id', 'default') + self.name = self.__class__.__name__ + self.ts = get_ts() + if config_file: + boto.config.read(config_file) + + def notify(self, subject, body=''): + boto.utils.notify(subject, body) + + def mkdir(self, path): + if not os.path.isdir(path): + try: + os.mkdir(path) + except: + boto.log.error('Error creating directory: %s' % path) + + def umount(self, path): + if os.path.ismount(path): + self.run('umount %s' % path) + + def run(self, command, notify=True, exit_on_error=False, cwd=None): + self.last_command = ShellCommand(command, cwd=cwd) + if self.last_command.status != 0: + boto.log.error('Error running command: "%s". Output: "%s"' % (command, self.last_command.output)) + if notify: + self.notify('Error encountered', + 'Error running the following command:\n\t%s\n\nCommand output:\n\t%s' % \ + (command, self.last_command.output)) + if exit_on_error: + sys.exit(-1) + return self.last_command.status + + def main(self): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/pyami/startup.py b/desktop/core/ext-py/boto-2.38.0/boto/pyami/startup.py new file mode 100644 index 0000000000000000000000000000000000000000..4bd9dadd89e67171d1cb013902bf634ed3bf6c2d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/pyami/startup.py @@ -0,0 +1,60 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import sys +import boto +from boto.utils import find_class +from boto import config +from boto.pyami.scriptbase import ScriptBase + + +class Startup(ScriptBase): + + def run_scripts(self): + scripts = config.get('Pyami', 'scripts') + if scripts: + for script in scripts.split(','): + script = script.strip(" ") + try: + pos = script.rfind('.') + if pos > 0: + mod_name = script[0:pos] + cls_name = script[pos + 1:] + cls = find_class(mod_name, cls_name) + boto.log.info('Running Script: %s' % script) + s = cls() + s.main() + else: + boto.log.warning('Trouble parsing script: %s' % script) + except Exception as e: + boto.log.exception('Problem Running Script: %s. Startup process halting.' % script) + raise e + + def main(self): + self.run_scripts() + self.notify('Startup Completed for %s' % config.get('Instance', 'instance-id')) + +if __name__ == "__main__": + if not config.has_section('loggers'): + boto.set_file_logger('startup', '/var/log/boto.log') + sys.path.append(config.get('Pyami', 'working_dir')) + su = Startup() + su.main() diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/rds/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8e8afa81d40c8aee34aba96387bff63f5c4ae462 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds/__init__.py @@ -0,0 +1,1623 @@ +# Copyright (c) 2009-2012 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import urllib +from boto.connection import AWSQueryConnection +from boto.rds.dbinstance import DBInstance +from boto.rds.dbsecuritygroup import DBSecurityGroup +from boto.rds.optiongroup import OptionGroup, OptionGroupOption +from boto.rds.parametergroup import ParameterGroup +from boto.rds.dbsnapshot import DBSnapshot +from boto.rds.event import Event +from boto.rds.regioninfo import RDSRegionInfo +from boto.rds.dbsubnetgroup import DBSubnetGroup +from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership +from boto.regioninfo import get_regions +from boto.rds.logfile import LogFile, LogFileObject + + +def regions(): + """ + Get all available regions for the RDS service. + + :rtype: list + :return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo` + """ + return get_regions( + 'rds', + region_cls=RDSRegionInfo, + connection_cls=RDSConnection + ) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.rds.RDSConnection`. + Any additional parameters after the region_name are passed on to + the connect method of the region object. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.rds.RDSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None + +#boto.set_stream_logger('rds') + + +class RDSConnection(AWSQueryConnection): + + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'rds.amazonaws.com' + APIVersion = '2013-05-15' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, + profile_name=None): + if not region: + region = RDSRegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(RDSConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + # DB Instance methods + + def get_all_dbinstances(self, instance_id=None, max_records=None, + marker=None): + """ + Retrieve all the DBInstances in your account. + + :type instance_id: str + :param instance_id: DB Instance identifier. If supplied, only + information this instance will be returned. + Otherwise, info about all DB Instances will + be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbinstance.DBInstance` + """ + params = {} + if instance_id: + params['DBInstanceIdentifier'] = instance_id + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBInstances', params, + [('DBInstance', DBInstance)]) + + def create_dbinstance(self, + id, + allocated_storage, + instance_class, + master_username, + master_password, + port=3306, + engine='MySQL5.1', + db_name=None, + param_group=None, + security_groups=None, + availability_zone=None, + preferred_maintenance_window=None, + backup_retention_period=None, + preferred_backup_window=None, + multi_az=False, + engine_version=None, + auto_minor_version_upgrade=True, + character_set_name = None, + db_subnet_group_name = None, + license_model = None, + option_group_name = None, + iops=None, + vpc_security_groups=None, + ): + # API version: 2013-09-09 + # Parameter notes: + # ================= + # id should be db_instance_identifier according to API docs but has been left + # id for backwards compatibility + # + # security_groups should be db_security_groups according to API docs but has been left + # security_groups for backwards compatibility + # + # master_password should be master_user_password according to API docs but has been left + # master_password for backwards compatibility + # + # instance_class should be db_instance_class according to API docs but has been left + # instance_class for backwards compatibility + """ + Create a new DBInstance. + + :type id: str + :param id: Unique identifier for the new instance. + Must contain 1-63 alphanumeric characters. + First character must be a letter. + May not end with a hyphen or contain two consecutive hyphens + + :type allocated_storage: int + :param allocated_storage: Initially allocated storage size, in GBs. + Valid values are depending on the engine value. + + * MySQL = 5--3072 + * oracle-se1 = 10--3072 + * oracle-se = 10--3072 + * oracle-ee = 10--3072 + * sqlserver-ee = 200--1024 + * sqlserver-se = 200--1024 + * sqlserver-ex = 30--1024 + * sqlserver-web = 30--1024 + * postgres = 5--3072 + + :type instance_class: str + :param instance_class: The compute and memory capacity of + the DBInstance. Valid values are: + + * db.t1.micro + * db.m1.small + * db.m1.medium + * db.m1.large + * db.m1.xlarge + * db.m2.xlarge + * db.m2.2xlarge + * db.m2.4xlarge + + :type engine: str + :param engine: Name of database engine. Defaults to MySQL but can be; + + * MySQL + * oracle-se1 + * oracle-se + * oracle-ee + * sqlserver-ee + * sqlserver-se + * sqlserver-ex + * sqlserver-web + * postgres + + :type master_username: str + :param master_username: Name of master user for the DBInstance. + + * MySQL must be; + - 1--16 alphanumeric characters + - first character must be a letter + - cannot be a reserved MySQL word + + * Oracle must be: + - 1--30 alphanumeric characters + - first character must be a letter + - cannot be a reserved Oracle word + + * SQL Server must be: + - 1--128 alphanumeric characters + - first character must be a letter + - cannot be a reserver SQL Server word + + :type master_password: str + :param master_password: Password of master user for the DBInstance. + + * MySQL must be 8--41 alphanumeric characters + + * Oracle must be 8--30 alphanumeric characters + + * SQL Server must be 8--128 alphanumeric characters. + + :type port: int + :param port: Port number on which database accepts connections. + Valid values [1115-65535]. + + * MySQL defaults to 3306 + + * Oracle defaults to 1521 + + * SQL Server defaults to 1433 and _cannot_ be 1434, 3389, + 47001, 49152, and 49152 through 49156. + + * PostgreSQL defaults to 5432 + + :type db_name: str + :param db_name: * MySQL: + Name of a database to create when the DBInstance + is created. Default is to create no databases. + + Must contain 1--64 alphanumeric characters and cannot + be a reserved MySQL word. + + * Oracle: + The Oracle System ID (SID) of the created DB instances. + Default is ORCL. Cannot be longer than 8 characters. + + * SQL Server: + Not applicable and must be None. + + * PostgreSQL: + Name of a database to create when the DBInstance + is created. Default is to create no databases. + + Must contain 1--63 alphanumeric characters. Must + begin with a letter or an underscore. Subsequent + characters can be letters, underscores, or digits (0-9) + and cannot be a reserved PostgreSQL word. + + :type param_group: str or ParameterGroup object + :param param_group: Name of DBParameterGroup or ParameterGroup instance + to associate with this DBInstance. If no groups are + specified no parameter groups will be used. + + :type security_groups: list of str or list of DBSecurityGroup objects + :param security_groups: List of names of DBSecurityGroup to + authorize on this DBInstance. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type preferred_maintenance_window: str + :param preferred_maintenance_window: The weekly time range (in UTC) + during which maintenance can occur. + Default is Sun:05:00-Sun:09:00 + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which automated + backups are retained. Setting this to + zero disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during which + automated backups are created (if + enabled). Must be in h24:mi-hh24:mi + format (UTC). + + :type multi_az: bool + :param multi_az: If True, specifies the DB Instance will be + deployed in multiple availability zones. + + For Microsoft SQL Server, must be set to false. You cannot set + the AvailabilityZone parameter if the MultiAZ parameter is + set to true. + + :type engine_version: str + :param engine_version: The version number of the database engine to use. + + * MySQL format example: 5.1.42 + + * Oracle format example: 11.2.0.2.v2 + + * SQL Server format example: 10.50.2789.0.v1 + + * PostgreSQL format example: 9.3 + + :type auto_minor_version_upgrade: bool + :param auto_minor_version_upgrade: Indicates that minor engine + upgrades will be applied + automatically to the Read Replica + during the maintenance window. + Default is True. + :type character_set_name: str + :param character_set_name: For supported engines, indicates that the DB Instance + should be associated with the specified CharacterSet. + + :type db_subnet_group_name: str + :param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance. + If there is no DB Subnet Group, then it is a non-VPC DB + instance. + + :type license_model: str + :param license_model: License model information for this DB Instance. + + Valid values are; + - license-included + - bring-your-own-license + - general-public-license + + All license types are not supported on all engines. + + :type option_group_name: str + :param option_group_name: Indicates that the DB Instance should be associated + with the specified option group. + + :type iops: int + :param iops: The amount of IOPS (input/output operations per second) to Provisioned + for the DB Instance. Can be modified at a later date. + + Must scale linearly. For every 1000 IOPS provision, you must allocated + 100 GB of storage space. This scales up to 1 TB / 10 000 IOPS for MySQL + and Oracle. MSSQL is limited to 700 GB / 7 000 IOPS. + + If you specify a value, it must be at least 1000 IOPS and you must + allocate 100 GB of storage. + + :type vpc_security_groups: list of str or a VPCSecurityGroupMembership object + :param vpc_security_groups: List of VPC security group ids or a list of + VPCSecurityGroupMembership objects this DBInstance should be a member of + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The new db instance. + """ + # boto argument alignment with AWS API parameter names: + # ===================================================== + # arg => AWS parameter + # allocated_storage => AllocatedStorage + # auto_minor_version_update => AutoMinorVersionUpgrade + # availability_zone => AvailabilityZone + # backup_retention_period => BackupRetentionPeriod + # character_set_name => CharacterSetName + # db_instance_class => DBInstanceClass + # db_instance_identifier => DBInstanceIdentifier + # db_name => DBName + # db_parameter_group_name => DBParameterGroupName + # db_security_groups => DBSecurityGroups.member.N + # db_subnet_group_name => DBSubnetGroupName + # engine => Engine + # engine_version => EngineVersion + # license_model => LicenseModel + # master_username => MasterUsername + # master_user_password => MasterUserPassword + # multi_az => MultiAZ + # option_group_name => OptionGroupName + # port => Port + # preferred_backup_window => PreferredBackupWindow + # preferred_maintenance_window => PreferredMaintenanceWindow + # vpc_security_groups => VpcSecurityGroupIds.member.N + params = { + 'AllocatedStorage': allocated_storage, + 'AutoMinorVersionUpgrade': str(auto_minor_version_upgrade).lower() if auto_minor_version_upgrade else None, + 'AvailabilityZone': availability_zone, + 'BackupRetentionPeriod': backup_retention_period, + 'CharacterSetName': character_set_name, + 'DBInstanceClass': instance_class, + 'DBInstanceIdentifier': id, + 'DBName': db_name, + 'DBParameterGroupName': (param_group.name + if isinstance(param_group, ParameterGroup) + else param_group), + 'DBSubnetGroupName': db_subnet_group_name, + 'Engine': engine, + 'EngineVersion': engine_version, + 'Iops': iops, + 'LicenseModel': license_model, + 'MasterUsername': master_username, + 'MasterUserPassword': master_password, + 'MultiAZ': str(multi_az).lower() if multi_az else None, + 'OptionGroupName': option_group_name, + 'Port': port, + 'PreferredBackupWindow': preferred_backup_window, + 'PreferredMaintenanceWindow': preferred_maintenance_window, + } + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, DBSecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, 'DBSecurityGroups.member') + + if vpc_security_groups: + l = [] + for vpc_grp in vpc_security_groups: + if isinstance(vpc_grp, VPCSecurityGroupMembership): + l.append(vpc_grp.vpc_group) + else: + l.append(vpc_grp) + self.build_list_params(params, l, 'VpcSecurityGroupIds.member') + + # Remove any params set to None + for k, v in params.items(): + if v is None: del(params[k]) + + return self.get_object('CreateDBInstance', params, DBInstance) + + def create_dbinstance_read_replica(self, id, source_id, + instance_class=None, + port=3306, + availability_zone=None, + auto_minor_version_upgrade=None): + """ + Create a new DBInstance Read Replica. + + :type id: str + :param id: Unique identifier for the new instance. + Must contain 1-63 alphanumeric characters. + First character must be a letter. + May not end with a hyphen or contain two consecutive hyphens + + :type source_id: str + :param source_id: Unique identifier for the DB Instance for which this + DB Instance will act as a Read Replica. + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Default is to inherit from + the source DB Instance. + + Valid values are: + + * db.m1.small + * db.m1.large + * db.m1.xlarge + * db.m2.xlarge + * db.m2.2xlarge + * db.m2.4xlarge + + :type port: int + :param port: Port number on which database accepts connections. + Default is to inherit from source DB Instance. + Valid values [1115-65535]. Defaults to 3306. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type auto_minor_version_upgrade: bool + :param auto_minor_version_upgrade: Indicates that minor engine + upgrades will be applied + automatically to the Read Replica + during the maintenance window. + Default is to inherit this value + from the source DB Instance. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The new db instance. + """ + params = {'DBInstanceIdentifier': id, + 'SourceDBInstanceIdentifier': source_id} + if instance_class: + params['DBInstanceClass'] = instance_class + if port: + params['Port'] = port + if availability_zone: + params['AvailabilityZone'] = availability_zone + if auto_minor_version_upgrade is not None: + if auto_minor_version_upgrade is True: + params['AutoMinorVersionUpgrade'] = 'true' + else: + params['AutoMinorVersionUpgrade'] = 'false' + + return self.get_object('CreateDBInstanceReadReplica', + params, DBInstance) + + + def promote_read_replica(self, id, + backup_retention_period=None, + preferred_backup_window=None): + """ + Promote a Read Replica to a standalone DB Instance. + + :type id: str + :param id: Unique identifier for the new instance. + Must contain 1-63 alphanumeric characters. + First character must be a letter. + May not end with a hyphen or contain two consecutive hyphens + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which automated + backups are retained. Setting this to + zero disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during which + automated backups are created (if + enabled). Must be in h24:mi-hh24:mi + format (UTC). + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The new db instance. + """ + params = {'DBInstanceIdentifier': id} + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window: + params['PreferredBackupWindow'] = preferred_backup_window + + return self.get_object('PromoteReadReplica', params, DBInstance) + + + def modify_dbinstance(self, id, param_group=None, security_groups=None, + preferred_maintenance_window=None, + master_password=None, allocated_storage=None, + instance_class=None, + backup_retention_period=None, + preferred_backup_window=None, + multi_az=False, + apply_immediately=False, + iops=None, + vpc_security_groups=None, + new_instance_id=None, + ): + """ + Modify an existing DBInstance. + + :type id: str + :param id: Unique identifier for the new instance. + + :type param_group: str or ParameterGroup object + :param param_group: Name of DBParameterGroup or ParameterGroup instance + to associate with this DBInstance. If no groups are + specified no parameter groups will be used. + + :type security_groups: list of str or list of DBSecurityGroup objects + :param security_groups: List of names of DBSecurityGroup to authorize on + this DBInstance. + + :type preferred_maintenance_window: str + :param preferred_maintenance_window: The weekly time range (in UTC) + during which maintenance can + occur. + Default is Sun:05:00-Sun:09:00 + + :type master_password: str + :param master_password: Password of master user for the DBInstance. + Must be 4-15 alphanumeric characters. + + :type allocated_storage: int + :param allocated_storage: The new allocated storage size, in GBs. + Valid values are [5-1024] + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Changes will be applied at + next maintenance window unless + apply_immediately is True. + + Valid values are: + + * db.m1.small + * db.m1.large + * db.m1.xlarge + * db.m2.xlarge + * db.m2.2xlarge + * db.m2.4xlarge + + :type apply_immediately: bool + :param apply_immediately: If true, the modifications will be applied + as soon as possible rather than waiting for + the next preferred maintenance window. + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which automated + backups are retained. Setting this to + zero disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during which + automated backups are created (if + enabled). Must be in h24:mi-hh24:mi + format (UTC). + + :type multi_az: bool + :param multi_az: If True, specifies the DB Instance will be + deployed in multiple availability zones. + + :type iops: int + :param iops: The amount of IOPS (input/output operations per second) to Provisioned + for the DB Instance. Can be modified at a later date. + + Must scale linearly. For every 1000 IOPS provision, you must allocated + 100 GB of storage space. This scales up to 1 TB / 10 000 IOPS for MySQL + and Oracle. MSSQL is limited to 700 GB / 7 000 IOPS. + + If you specify a value, it must be at least 1000 IOPS and you must + allocate 100 GB of storage. + + :type vpc_security_groups: list of str or a VPCSecurityGroupMembership object + :param vpc_security_groups: List of VPC security group ids or a + VPCSecurityGroupMembership object this DBInstance should be a member of + + :type new_instance_id: str + :param new_instance_id: New name to rename the DBInstance to. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The modified db instance. + """ + params = {'DBInstanceIdentifier': id} + if param_group: + params['DBParameterGroupName'] = (param_group.name + if isinstance(param_group, ParameterGroup) + else param_group) + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, DBSecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, 'DBSecurityGroups.member') + if vpc_security_groups: + l = [] + for vpc_grp in vpc_security_groups: + if isinstance(vpc_grp, VPCSecurityGroupMembership): + l.append(vpc_grp.vpc_group) + else: + l.append(vpc_grp) + self.build_list_params(params, l, 'VpcSecurityGroupIds.member') + if preferred_maintenance_window: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if master_password: + params['MasterUserPassword'] = master_password + if allocated_storage: + params['AllocatedStorage'] = allocated_storage + if instance_class: + params['DBInstanceClass'] = instance_class + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window: + params['PreferredBackupWindow'] = preferred_backup_window + if multi_az: + params['MultiAZ'] = 'true' + if apply_immediately: + params['ApplyImmediately'] = 'true' + if iops: + params['Iops'] = iops + if new_instance_id: + params['NewDBInstanceIdentifier'] = new_instance_id + + return self.get_object('ModifyDBInstance', params, DBInstance) + + def delete_dbinstance(self, id, skip_final_snapshot=False, + final_snapshot_id=''): + """ + Delete an existing DBInstance. + + :type id: str + :param id: Unique identifier for the new instance. + + :type skip_final_snapshot: bool + :param skip_final_snapshot: This parameter determines whether a final + db snapshot is created before the instance + is deleted. If True, no snapshot + is created. If False, a snapshot + is created before deleting the instance. + + :type final_snapshot_id: str + :param final_snapshot_id: If a final snapshot is requested, this + is the identifier used for that snapshot. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The deleted db instance. + """ + params = {'DBInstanceIdentifier': id} + if skip_final_snapshot: + params['SkipFinalSnapshot'] = 'true' + else: + params['SkipFinalSnapshot'] = 'false' + params['FinalDBSnapshotIdentifier'] = final_snapshot_id + return self.get_object('DeleteDBInstance', params, DBInstance) + + def reboot_dbinstance(self, id): + """ + Reboot DBInstance. + + :type id: str + :param id: Unique identifier of the instance. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The rebooting db instance. + """ + params = {'DBInstanceIdentifier': id} + return self.get_object('RebootDBInstance', params, DBInstance) + + # DBParameterGroup methods + + def get_all_dbparameter_groups(self, groupname=None, max_records=None, + marker=None): + """ + Get all parameter groups associated with your account in a region. + + :type groupname: str + :param groupname: The name of the DBParameter group to retrieve. + If not provided, all DBParameter groups will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.ec2.parametergroup.ParameterGroup` + """ + params = {} + if groupname: + params['DBParameterGroupName'] = groupname + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBParameterGroups', params, + [('DBParameterGroup', ParameterGroup)]) + + def get_all_dbparameters(self, groupname, source=None, + max_records=None, marker=None): + """ + Get all parameters associated with a ParameterGroup + + :type groupname: str + :param groupname: The name of the DBParameter group to retrieve. + + :type source: str + :param source: Specifies which parameters to return. + If not specified, all parameters will be returned. + Valid values are: user|system|engine-default + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: :class:`boto.ec2.parametergroup.ParameterGroup` + :return: The ParameterGroup + """ + params = {'DBParameterGroupName': groupname} + if source: + params['Source'] = source + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + pg = self.get_object('DescribeDBParameters', params, ParameterGroup) + pg.name = groupname + return pg + + def create_parameter_group(self, name, engine='MySQL5.1', description=''): + """ + Create a new dbparameter group for your account. + + :type name: string + :param name: The name of the new dbparameter group + + :type engine: str + :param engine: Name of database engine. + + :type description: string + :param description: The description of the new dbparameter group + + :rtype: :class:`boto.rds.parametergroup.ParameterGroup` + :return: The newly created ParameterGroup + """ + params = {'DBParameterGroupName': name, + 'DBParameterGroupFamily': engine, + 'Description': description} + return self.get_object('CreateDBParameterGroup', params, ParameterGroup) + + def modify_parameter_group(self, name, parameters=None): + """ + Modify a ParameterGroup for your account. + + :type name: string + :param name: The name of the new ParameterGroup + + :type parameters: list of :class:`boto.rds.parametergroup.Parameter` + :param parameters: The new parameters + + :rtype: :class:`boto.rds.parametergroup.ParameterGroup` + :return: The newly created ParameterGroup + """ + params = {'DBParameterGroupName': name} + for i in range(0, len(parameters)): + parameter = parameters[i] + parameter.merge(params, i+1) + return self.get_list('ModifyDBParameterGroup', params, + ParameterGroup, verb='POST') + + def reset_parameter_group(self, name, reset_all_params=False, + parameters=None): + """ + Resets some or all of the parameters of a ParameterGroup to the + default value + + :type key_name: string + :param key_name: The name of the ParameterGroup to reset + + :type parameters: list of :class:`boto.rds.parametergroup.Parameter` + :param parameters: The parameters to reset. If not supplied, + all parameters will be reset. + """ + params = {'DBParameterGroupName': name} + if reset_all_params: + params['ResetAllParameters'] = 'true' + else: + params['ResetAllParameters'] = 'false' + for i in range(0, len(parameters)): + parameter = parameters[i] + parameter.merge(params, i+1) + return self.get_status('ResetDBParameterGroup', params) + + def delete_parameter_group(self, name): + """ + Delete a ParameterGroup from your account. + + :type key_name: string + :param key_name: The name of the ParameterGroup to delete + """ + params = {'DBParameterGroupName': name} + return self.get_status('DeleteDBParameterGroup', params) + + # DBSecurityGroup methods + + def get_all_dbsecurity_groups(self, groupname=None, max_records=None, + marker=None): + """ + Get all security groups associated with your account in a region. + + :type groupnames: list + :param groupnames: A list of the names of security groups to retrieve. + If not provided, all security groups will + be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbsecuritygroup.DBSecurityGroup` + """ + params = {} + if groupname: + params['DBSecurityGroupName'] = groupname + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBSecurityGroups', params, + [('DBSecurityGroup', DBSecurityGroup)]) + + def create_dbsecurity_group(self, name, description=None): + """ + Create a new security group for your account. + This will create the security group within the region you + are currently connected to. + + :type name: string + :param name: The name of the new security group + + :type description: string + :param description: The description of the new security group + + :rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup` + :return: The newly created DBSecurityGroup + """ + params = {'DBSecurityGroupName': name} + if description: + params['DBSecurityGroupDescription'] = description + group = self.get_object('CreateDBSecurityGroup', params, + DBSecurityGroup) + group.name = name + group.description = description + return group + + def delete_dbsecurity_group(self, name): + """ + Delete a DBSecurityGroup from your account. + + :type key_name: string + :param key_name: The name of the DBSecurityGroup to delete + """ + params = {'DBSecurityGroupName': name} + return self.get_status('DeleteDBSecurityGroup', params) + + def authorize_dbsecurity_group(self, group_name, cidr_ip=None, + ec2_security_group_name=None, + ec2_security_group_owner_id=None): + """ + Add a new rule to an existing security group. + You need to pass in either src_security_group_name and + src_security_group_owner_id OR a CIDR block but not both. + + :type group_name: string + :param group_name: The name of the security group you are adding + the rule to. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 security group + you are granting access to. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The ID of the owner of the EC2 + security group you are granting + access to. + + :type cidr_ip: string + :param cidr_ip: The CIDR block you are providing access to. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :rtype: bool + :return: True if successful. + """ + params = {'DBSecurityGroupName': group_name} + if ec2_security_group_name: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + if cidr_ip: + params['CIDRIP'] = urllib.quote(cidr_ip) + return self.get_object('AuthorizeDBSecurityGroupIngress', params, + DBSecurityGroup) + + def revoke_dbsecurity_group(self, group_name, ec2_security_group_name=None, + ec2_security_group_owner_id=None, cidr_ip=None): + """ + Remove an existing rule from an existing security group. + You need to pass in either ec2_security_group_name and + ec2_security_group_owner_id OR a CIDR block. + + :type group_name: string + :param group_name: The name of the security group you are removing + the rule from. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 security group + from which you are removing access. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The ID of the owner of the EC2 + security from which you are + removing access. + + :type cidr_ip: string + :param cidr_ip: The CIDR block from which you are removing access. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :rtype: bool + :return: True if successful. + """ + params = {'DBSecurityGroupName': group_name} + if ec2_security_group_name: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + if cidr_ip: + params['CIDRIP'] = cidr_ip + return self.get_object('RevokeDBSecurityGroupIngress', params, + DBSecurityGroup) + + # For backwards compatibility. This method was improperly named + # in previous versions. I have renamed it to match the others. + revoke_security_group = revoke_dbsecurity_group + + # DBSnapshot methods + + def get_all_dbsnapshots(self, snapshot_id=None, instance_id=None, + max_records=None, marker=None): + """ + Get information about DB Snapshots. + + :type snapshot_id: str + :param snapshot_id: The unique identifier of an RDS snapshot. + If not provided, all RDS snapshots will be returned. + + :type instance_id: str + :param instance_id: The identifier of a DBInstance. If provided, + only the DBSnapshots related to that instance will + be returned. + If not provided, all RDS snapshots will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbsnapshot.DBSnapshot` + """ + params = {} + if snapshot_id: + params['DBSnapshotIdentifier'] = snapshot_id + if instance_id: + params['DBInstanceIdentifier'] = instance_id + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBSnapshots', params, + [('DBSnapshot', DBSnapshot)]) + + def get_all_logs(self, dbinstance_id, max_records=None, marker=None, file_size=None, filename_contains=None, file_last_written=None): + """ + Get all log files + + :type instance_id: str + :param instance_id: The identifier of a DBInstance. + + :type max_records: int + :param max_records: Number of log file names to return. + + :type marker: str + :param marker: The marker provided by a previous request. + + :file_size: int + :param file_size: Filter results to files large than this size in bytes. + + :filename_contains: str + :param filename_contains: Filter results to files with filename containing this string + + :file_last_written: int + :param file_last_written: Filter results to files written after this time (POSIX timestamp) + + :rtype: list + :return: A list of :class:`boto.rds.logfile.LogFile` + """ + params = {'DBInstanceIdentifier': dbinstance_id} + + if file_size: + params['FileSize'] = file_size + + if filename_contains: + params['FilenameContains'] = filename_contains + + if file_last_written: + params['FileLastWritten'] = file_last_written + + if marker: + params['Marker'] = marker + + if max_records: + params['MaxRecords'] = max_records + + return self.get_list('DescribeDBLogFiles', params, + [('DescribeDBLogFilesDetails',LogFile)]) + + def get_log_file(self, dbinstance_id, log_file_name, marker=None, number_of_lines=None, max_records=None): + """ + Download a log file from RDS + + :type instance_id: str + :param instance_id: The identifier of a DBInstance. + + :type log_file_name: str + :param log_file_name: The name of the log file to retrieve + + :type marker: str + :param marker: A marker returned from a previous call to this method, or 0 to indicate the start of file. If + no marker is specified, this will fetch log lines from the end of file instead. + + :type number_of_lines: int + :param marker: The maximium number of lines to be returned. + """ + + params = { + 'DBInstanceIdentifier': dbinstance_id, + 'LogFileName': log_file_name, + } + + if marker: + params['Marker'] = marker + + if number_of_lines: + params['NumberOfLines'] = number_of_lines + + if max_records: + params['MaxRecords'] = max_records + + logfile = self.get_object('DownloadDBLogFilePortion', params, LogFileObject) + + if logfile: + logfile.log_filename = log_file_name + logfile.dbinstance_id = dbinstance_id + + return logfile + + def create_dbsnapshot(self, snapshot_id, dbinstance_id): + """ + Create a new DB snapshot. + + :type snapshot_id: string + :param snapshot_id: The identifier for the DBSnapshot + + :type dbinstance_id: string + :param dbinstance_id: The source identifier for the RDS instance from + which the snapshot is created. + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot + """ + params = {'DBSnapshotIdentifier': snapshot_id, + 'DBInstanceIdentifier': dbinstance_id} + return self.get_object('CreateDBSnapshot', params, DBSnapshot) + + def copy_dbsnapshot(self, source_snapshot_id, target_snapshot_id): + """ + Copies the specified DBSnapshot. + + :type source_snapshot_id: string + :param source_snapshot_id: The identifier for the source DB snapshot. + + :type target_snapshot_id: string + :param target_snapshot_id: The identifier for the copied snapshot. + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot. + """ + params = {'SourceDBSnapshotIdentifier': source_snapshot_id, + 'TargetDBSnapshotIdentifier': target_snapshot_id} + return self.get_object('CopyDBSnapshot', params, DBSnapshot) + + def delete_dbsnapshot(self, identifier): + """ + Delete a DBSnapshot + + :type identifier: string + :param identifier: The identifier of the DBSnapshot to delete + """ + params = {'DBSnapshotIdentifier': identifier} + return self.get_object('DeleteDBSnapshot', params, DBSnapshot) + + def restore_dbinstance_from_dbsnapshot(self, identifier, instance_id, + instance_class, port=None, + availability_zone=None, + multi_az=None, + auto_minor_version_upgrade=None, + db_subnet_group_name=None): + """ + Create a new DBInstance from a DB snapshot. + + :type identifier: string + :param identifier: The identifier for the DBSnapshot + + :type instance_id: string + :param instance_id: The source identifier for the RDS instance from + which the snapshot is created. + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Valid values are: + db.m1.small | db.m1.large | db.m1.xlarge | + db.m2.2xlarge | db.m2.4xlarge + + :type port: int + :param port: Port number on which database accepts connections. + Valid values [1115-65535]. Defaults to 3306. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type multi_az: bool + :param multi_az: If True, specifies the DB Instance will be + deployed in multiple availability zones. + Default is the API default. + + :type auto_minor_version_upgrade: bool + :param auto_minor_version_upgrade: Indicates that minor engine + upgrades will be applied + automatically to the Read Replica + during the maintenance window. + Default is the API default. + + :type db_subnet_group_name: str + :param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance. + If there is no DB Subnet Group, then it is a non-VPC DB + instance. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The newly created DBInstance + """ + params = {'DBSnapshotIdentifier': identifier, + 'DBInstanceIdentifier': instance_id, + 'DBInstanceClass': instance_class} + if port: + params['Port'] = port + if availability_zone: + params['AvailabilityZone'] = availability_zone + if multi_az is not None: + params['MultiAZ'] = str(multi_az).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str(auto_minor_version_upgrade).lower() + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + return self.get_object('RestoreDBInstanceFromDBSnapshot', + params, DBInstance) + + def restore_dbinstance_from_point_in_time(self, source_instance_id, + target_instance_id, + use_latest=False, + restore_time=None, + dbinstance_class=None, + port=None, + availability_zone=None, + db_subnet_group_name=None): + + """ + Create a new DBInstance from a point in time. + + :type source_instance_id: string + :param source_instance_id: The identifier for the source DBInstance. + + :type target_instance_id: string + :param target_instance_id: The identifier of the new DBInstance. + + :type use_latest: bool + :param use_latest: If True, the latest snapshot availabile will + be used. + + :type restore_time: datetime + :param restore_time: The date and time to restore from. Only + used if use_latest is False. + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Valid values are: + db.m1.small | db.m1.large | db.m1.xlarge | + db.m2.2xlarge | db.m2.4xlarge + + :type port: int + :param port: Port number on which database accepts connections. + Valid values [1115-65535]. Defaults to 3306. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type db_subnet_group_name: str + :param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance. + If there is no DB Subnet Group, then it is a non-VPC DB + instance. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The newly created DBInstance + """ + params = {'SourceDBInstanceIdentifier': source_instance_id, + 'TargetDBInstanceIdentifier': target_instance_id} + if use_latest: + params['UseLatestRestorableTime'] = 'true' + elif restore_time: + params['RestoreTime'] = restore_time.isoformat() + if dbinstance_class: + params['DBInstanceClass'] = dbinstance_class + if port: + params['Port'] = port + if availability_zone: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + return self.get_object('RestoreDBInstanceToPointInTime', + params, DBInstance) + + # Events + + def get_all_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, + max_records=None, marker=None): + """ + Get information about events related to your DBInstances, + DBSecurityGroups and DBParameterGroups. + + :type source_identifier: str + :param source_identifier: If supplied, the events returned will be + limited to those that apply to the identified + source. The value of this parameter depends + on the value of source_type. If neither + parameter is specified, all events in the time + span will be returned. + + :type source_type: str + :param source_type: Specifies how the source_identifier should + be interpreted. Valid values are: + b-instance | db-security-group | + db-parameter-group | db-snapshot + + :type start_time: datetime + :param start_time: The beginning of the time interval for events. + If not supplied, all available events will + be returned. + + :type end_time: datetime + :param end_time: The ending of the time interval for events. + If not supplied, all available events will + be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of class:`boto.rds.event.Event` + """ + params = {} + if source_identifier and source_type: + params['SourceIdentifier'] = source_identifier + params['SourceType'] = source_type + if start_time: + params['StartTime'] = start_time.isoformat() + if end_time: + params['EndTime'] = end_time.isoformat() + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeEvents', params, [('Event', Event)]) + + def create_db_subnet_group(self, name, desc, subnet_ids): + """ + Create a new Database Subnet Group. + + :type name: string + :param name: The identifier for the db_subnet_group + + :type desc: string + :param desc: A description of the db_subnet_group + + :type subnet_ids: list + :param subnets: A list of the subnet identifiers to include in the + db_subnet_group + + :rtype: :class:`boto.rds.dbsubnetgroup.DBSubnetGroup + :return: the created db_subnet_group + """ + + params = {'DBSubnetGroupName': name, + 'DBSubnetGroupDescription': desc} + self.build_list_params(params, subnet_ids, 'SubnetIds.member') + + return self.get_object('CreateDBSubnetGroup', params, DBSubnetGroup) + + def delete_db_subnet_group(self, name): + """ + Delete a Database Subnet Group. + + :type name: string + :param name: The identifier of the db_subnet_group to delete + + :rtype: :class:`boto.rds.dbsubnetgroup.DBSubnetGroup` + :return: The deleted db_subnet_group. + """ + + params = {'DBSubnetGroupName': name} + + return self.get_object('DeleteDBSubnetGroup', params, DBSubnetGroup) + + + def get_all_db_subnet_groups(self, name=None, max_records=None, marker=None): + """ + Retrieve all the DBSubnetGroups in your account. + + :type name: str + :param name: DBSubnetGroup name If supplied, only information about + this DBSubnetGroup will be returned. Otherwise, info + about all DBSubnetGroups will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a Token will be + returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbsubnetgroup.DBSubnetGroup` + """ + params = dict() + if name is not None: + params['DBSubnetGroupName'] = name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + + return self.get_list('DescribeDBSubnetGroups', params, [('DBSubnetGroup',DBSubnetGroup)]) + + def modify_db_subnet_group(self, name, description=None, subnet_ids=None): + """ + Modify a parameter group for your account. + + :type name: string + :param name: The name of the new parameter group + + :type parameters: list of :class:`boto.rds.parametergroup.Parameter` + :param parameters: The new parameters + + :rtype: :class:`boto.rds.parametergroup.ParameterGroup` + :return: The newly created ParameterGroup + """ + params = {'DBSubnetGroupName': name} + if description is not None: + params['DBSubnetGroupDescription'] = description + if subnet_ids is not None: + self.build_list_params(params, subnet_ids, 'SubnetIds.member') + + return self.get_object('ModifyDBSubnetGroup', params, DBSubnetGroup) + + def create_option_group(self, name, engine_name, major_engine_version, + description=None): + """ + Create a new option group for your account. + This will create the option group within the region you + are currently connected to. + + :type name: string + :param name: The name of the new option group + + :type engine_name: string + :param engine_name: Specifies the name of the engine that this option + group should be associated with. + + :type major_engine_version: string + :param major_engine_version: Specifies the major version of the engine + that this option group should be + associated with. + + :type description: string + :param description: The description of the new option group + + :rtype: :class:`boto.rds.optiongroup.OptionGroup` + :return: The newly created OptionGroup + """ + params = { + 'OptionGroupName': name, + 'EngineName': engine_name, + 'MajorEngineVersion': major_engine_version, + 'OptionGroupDescription': description, + } + group = self.get_object('CreateOptionGroup', params, OptionGroup) + group.name = name + group.engine_name = engine_name + group.major_engine_version = major_engine_version + group.description = description + return group + + def delete_option_group(self, name): + """ + Delete an OptionGroup from your account. + + :type key_name: string + :param key_name: The name of the OptionGroup to delete + """ + params = {'OptionGroupName': name} + return self.get_status('DeleteOptionGroup', params) + + def describe_option_groups(self, name=None, engine_name=None, + major_engine_version=None, max_records=100, + marker=None): + """ + Describes the available option groups. + + :type name: str + :param name: The name of the option group to describe. Cannot be + supplied together with engine_name or major_engine_version. + + :type engine_name: str + :param engine_name: Filters the list of option groups to only include + groups associated with a specific database engine. + + :type major_engine_version: datetime + :param major_engine_version: Filters the list of option groups to only + include groups associated with a specific + database engine version. If specified, then + engine_name must also be specified. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of class:`boto.rds.optiongroup.OptionGroup` + """ + params = {} + if name: + params['OptionGroupName'] = name + elif engine_name and major_engine_version: + params['EngineName'] = engine_name + params['MajorEngineVersion'] = major_engine_version + if max_records: + params['MaxRecords'] = int(max_records) + if marker: + params['Marker'] = marker + return self.get_list('DescribeOptionGroups', params, [ + ('OptionGroup', OptionGroup) + ]) + + def describe_option_group_options(self, engine_name=None, + major_engine_version=None, max_records=100, + marker=None): + """ + Describes the available option group options. + + :type engine_name: str + :param engine_name: Filters the list of option groups to only include + groups associated with a specific database engine. + + :type major_engine_version: datetime + :param major_engine_version: Filters the list of option groups to only + include groups associated with a specific + database engine version. If specified, then + engine_name must also be specified. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of class:`boto.rds.optiongroup.Option` + """ + params = {} + if engine_name and major_engine_version: + params['EngineName'] = engine_name + params['MajorEngineVersion'] = major_engine_version + if max_records: + params['MaxRecords'] = int(max_records) + if marker: + params['Marker'] = marker + return self.get_list('DescribeOptionGroupOptions', params, [ + ('OptionGroupOptions', OptionGroupOption) + ]) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds/dbinstance.py b/desktop/core/ext-py/boto-2.38.0/boto/rds/dbinstance.py new file mode 100644 index 0000000000000000000000000000000000000000..6a6385103d3ab4fa3f4e7759af9ef993e9b9b80e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds/dbinstance.py @@ -0,0 +1,416 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.rds.dbsecuritygroup import DBSecurityGroup +from boto.rds.parametergroup import ParameterGroup +from boto.rds.statusinfo import StatusInfo +from boto.rds.dbsubnetgroup import DBSubnetGroup +from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership +from boto.resultset import ResultSet + + +class DBInstance(object): + """ + Represents a RDS DBInstance + + Properties reference available from the AWS documentation at + http://goo.gl/sC2Kn + + :ivar connection: connection + :ivar id: The name and identifier of the DBInstance + :ivar create_time: The date and time of creation + :ivar engine: The database engine being used + :ivar status: The status of the database in a string. e.g. "available" + :ivar allocated_storage: The size of the disk in gigabytes (int). + :ivar auto_minor_version_upgrade: Indicates that minor version patches + are applied automatically. + :ivar endpoint: A tuple that describes the hostname and port of + the instance. This is only available when the database is + in status "available". + :ivar instance_class: Contains the name of the compute and memory + capacity class of the DB Instance. + :ivar master_username: The username that is set as master username + at creation time. + :ivar parameter_groups: Provides the list of DB Parameter Groups + applied to this DB Instance. + :ivar security_groups: Provides List of DB Security Group elements + containing only DBSecurityGroup.Name and DBSecurityGroup.Status + subelements. + :ivar availability_zone: Specifies the name of the Availability Zone + the DB Instance is located in. + :ivar backup_retention_period: Specifies the number of days for + which automatic DB Snapshots are retained. + :ivar preferred_backup_window: Specifies the daily time range during + which automated backups are created if automated backups are + enabled, as determined by the backup_retention_period. + :ivar preferred_maintenance_window: Specifies the weekly time + range (in UTC) during which system maintenance can occur. (string) + :ivar latest_restorable_time: Specifies the latest time to which + a database can be restored with point-in-time restore. (string) + :ivar multi_az: Boolean that specifies if the DB Instance is a + Multi-AZ deployment. + :ivar iops: The current number of provisioned IOPS for the DB Instance. + Can be None if this is a standard instance. + :ivar vpc_security_groups: List of VPC Security Group Membership elements + containing only VpcSecurityGroupMembership.VpcSecurityGroupId and + VpcSecurityGroupMembership.Status subelements. + :ivar pending_modified_values: Specifies that changes to the + DB Instance are pending. This element is only included when changes + are pending. Specific changes are identified by subelements. + :ivar read_replica_dbinstance_identifiers: List of read replicas + associated with this DB instance. + :ivar status_infos: The status of a Read Replica. If the instance is not a + for a read replica, this will be blank. + :ivar character_set_name: If present, specifies the name of the character + set that this instance is associated with. + :ivar subnet_group: Specifies information on the subnet group associated + with the DB instance, including the name, description, and subnets + in the subnet group. + :ivar engine_version: Indicates the database engine version. + :ivar license_model: License model information for this DB instance. + """ + + def __init__(self, connection=None, id=None): + self.connection = connection + self.id = id + self.create_time = None + self.engine = None + self.status = None + self.allocated_storage = None + self.auto_minor_version_upgrade = None + self.endpoint = None + self.instance_class = None + self.master_username = None + self.parameter_groups = [] + self.security_groups = [] + self.read_replica_dbinstance_identifiers = [] + self.availability_zone = None + self.backup_retention_period = None + self.preferred_backup_window = None + self.preferred_maintenance_window = None + self.latest_restorable_time = None + self.multi_az = False + self.iops = None + self.vpc_security_groups = None + self.pending_modified_values = None + self._in_endpoint = False + self._port = None + self._address = None + self.status_infos = None + self.character_set_name = None + self.subnet_group = None + self.engine_version = None + self.license_model = None + + def __repr__(self): + return 'DBInstance:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'Endpoint': + self._in_endpoint = True + elif name == 'DBParameterGroups': + self.parameter_groups = ResultSet([('DBParameterGroup', + ParameterGroup)]) + return self.parameter_groups + elif name == 'DBSecurityGroups': + self.security_groups = ResultSet([('DBSecurityGroup', + DBSecurityGroup)]) + return self.security_groups + elif name == 'VpcSecurityGroups': + self.vpc_security_groups = ResultSet([('VpcSecurityGroupMembership', + VPCSecurityGroupMembership)]) + return self.vpc_security_groups + elif name == 'PendingModifiedValues': + self.pending_modified_values = PendingModifiedValues() + return self.pending_modified_values + elif name == 'ReadReplicaDBInstanceIdentifiers': + self.read_replica_dbinstance_identifiers = \ + ReadReplicaDBInstanceIdentifiers() + return self.read_replica_dbinstance_identifiers + elif name == 'StatusInfos': + self.status_infos = ResultSet([ + ('DBInstanceStatusInfo', StatusInfo) + ]) + return self.status_infos + elif name == 'DBSubnetGroup': + self.subnet_group = DBSubnetGroup() + return self.subnet_group + return None + + def endElement(self, name, value, connection): + if name == 'DBInstanceIdentifier': + self.id = value + elif name == 'DBInstanceStatus': + self.status = value + elif name == 'InstanceCreateTime': + self.create_time = value + elif name == 'Engine': + self.engine = value + elif name == 'DBInstanceStatus': + self.status = value + elif name == 'AllocatedStorage': + self.allocated_storage = int(value) + elif name == 'AutoMinorVersionUpgrade': + self.auto_minor_version_upgrade = value.lower() == 'true' + elif name == 'DBInstanceClass': + self.instance_class = value + elif name == 'MasterUsername': + self.master_username = value + elif name == 'Port': + if self._in_endpoint: + self._port = int(value) + elif name == 'Address': + if self._in_endpoint: + self._address = value + elif name == 'Endpoint': + self.endpoint = (self._address, self._port) + self._in_endpoint = False + elif name == 'AvailabilityZone': + self.availability_zone = value + elif name == 'BackupRetentionPeriod': + self.backup_retention_period = int(value) + elif name == 'LatestRestorableTime': + self.latest_restorable_time = value + elif name == 'PreferredMaintenanceWindow': + self.preferred_maintenance_window = value + elif name == 'PreferredBackupWindow': + self.preferred_backup_window = value + elif name == 'MultiAZ': + if value.lower() == 'true': + self.multi_az = True + elif name == 'Iops': + self.iops = int(value) + elif name == 'CharacterSetName': + self.character_set_name = value + elif name == 'EngineVersion': + self.engine_version = value + elif name == 'LicenseModel': + self.license_model = value + else: + setattr(self, name, value) + + @property + def security_group(self): + """ + Provide backward compatibility for previous security_group + attribute. + """ + if len(self.security_groups) > 0: + return self.security_groups[-1] + else: + return None + + @property + def parameter_group(self): + """ + Provide backward compatibility for previous parameter_group + attribute. + """ + if len(self.parameter_groups) > 0: + return self.parameter_groups[-1] + else: + return None + + def snapshot(self, snapshot_id): + """ + Create a new DB snapshot of this DBInstance. + + :type identifier: string + :param identifier: The identifier for the DBSnapshot + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot + """ + return self.connection.create_dbsnapshot(snapshot_id, self.id) + + def reboot(self): + """ + Reboot this DBInstance + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot + """ + return self.connection.reboot_dbinstance(self.id) + + def update(self, validate=False): + """ + Update the DB instance's status information by making a call to fetch + the current instance attributes from the service. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + instance the update method returns quietly. If the + validate param is True, however, it will raise a + ValueError exception if no data is returned from EC2. + """ + rs = self.connection.get_all_dbinstances(self.id) + if len(rs) > 0: + for i in rs: + if i.id == self.id: + self.__dict__.update(i.__dict__) + elif validate: + raise ValueError('%s is not a valid Instance ID' % self.id) + return self.status + + def stop(self, skip_final_snapshot=False, final_snapshot_id=''): + """ + Delete this DBInstance. + + :type skip_final_snapshot: bool + :param skip_final_snapshot: This parameter determines whether + a final db snapshot is created before the instance is + deleted. If True, no snapshot is created. If False, a + snapshot is created before deleting the instance. + + :type final_snapshot_id: str + :param final_snapshot_id: If a final snapshot is requested, this + is the identifier used for that snapshot. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The deleted db instance. + """ + return self.connection.delete_dbinstance(self.id, + skip_final_snapshot, + final_snapshot_id) + + def modify(self, param_group=None, security_groups=None, + preferred_maintenance_window=None, + master_password=None, allocated_storage=None, + instance_class=None, + backup_retention_period=None, + preferred_backup_window=None, + multi_az=False, + iops=None, + vpc_security_groups=None, + apply_immediately=False, + new_instance_id=None): + """ + Modify this DBInstance. + + :type param_group: str + :param param_group: Name of DBParameterGroup to associate with + this DBInstance. + + :type security_groups: list of str or list of DBSecurityGroup objects + :param security_groups: List of names of DBSecurityGroup to + authorize on this DBInstance. + + :type preferred_maintenance_window: str + :param preferred_maintenance_window: The weekly time range (in + UTC) during which maintenance can occur. Default is + Sun:05:00-Sun:09:00 + + :type master_password: str + :param master_password: Password of master user for the DBInstance. + Must be 4-15 alphanumeric characters. + + :type allocated_storage: int + :param allocated_storage: The new allocated storage size, in GBs. + Valid values are [5-1024] + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Changes will be applied at next maintenance + window unless apply_immediately is True. + + Valid values are: + + * db.m1.small + * db.m1.large + * db.m1.xlarge + * db.m2.xlarge + * db.m2.2xlarge + * db.m2.4xlarge + + :type apply_immediately: bool + :param apply_immediately: If true, the modifications will be + applied as soon as possible rather than waiting for the + next preferred maintenance window. + + :type new_instance_id: str + :param new_instance_id: The new DB instance identifier. + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which + automated backups are retained. Setting this to zero + disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during + which automated backups are created (if enabled). Must be + in h24:mi-hh24:mi format (UTC). + + :type multi_az: bool + :param multi_az: If True, specifies the DB Instance will be + deployed in multiple availability zones. + + :type iops: int + :param iops: The amount of IOPS (input/output operations per + second) to Provisioned for the DB Instance. Can be + modified at a later date. + + Must scale linearly. For every 1000 IOPS provision, you + must allocated 100 GB of storage space. This scales up to + 1 TB / 10 000 IOPS for MySQL and Oracle. MSSQL is limited + to 700 GB / 7 000 IOPS. + + If you specify a value, it must be at least 1000 IOPS and + you must allocate 100 GB of storage. + + :type vpc_security_groups: list + :param vpc_security_groups: List of VPCSecurityGroupMembership + that this DBInstance is a memberof. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The modified db instance. + """ + return self.connection.modify_dbinstance(self.id, + param_group, + security_groups, + preferred_maintenance_window, + master_password, + allocated_storage, + instance_class, + backup_retention_period, + preferred_backup_window, + multi_az, + apply_immediately, + iops, + vpc_security_groups, + new_instance_id) + + +class PendingModifiedValues(dict): + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name != 'PendingModifiedValues': + self[name] = value + + +class ReadReplicaDBInstanceIdentifiers(list): + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ReadReplicaDBInstanceIdentifier': + self.append(value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds/dbsecuritygroup.py b/desktop/core/ext-py/boto-2.38.0/boto/rds/dbsecuritygroup.py new file mode 100644 index 0000000000000000000000000000000000000000..378360667da12af06bb9e26590e8803836b6873b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds/dbsecuritygroup.py @@ -0,0 +1,186 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an DBSecurityGroup +""" +from boto.ec2.securitygroup import SecurityGroup + +class DBSecurityGroup(object): + """ + Represents an RDS database security group + + Properties reference available from the AWS documentation at + http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSecurityGroup.html + + :ivar Status: The current status of the security group. Possible values are + [ active, ? ]. Reference documentation lacks specifics of possibilities + :ivar connection: :py:class:`boto.rds.RDSConnection` associated with the current object + :ivar description: The description of the security group + :ivar ec2_groups: List of :py:class:`EC2 Security Group + ` objects that this security + group PERMITS + :ivar ip_ranges: List of :py:class:`boto.rds.dbsecuritygroup.IPRange` + objects (containing CIDR addresses) that this security group PERMITS + :ivar name: Name of the security group + :ivar owner_id: ID of the owner of the security group. Can be 'None' + """ + def __init__(self, connection=None, owner_id=None, + name=None, description=None): + self.connection = connection + self.owner_id = owner_id + self.name = name + self.description = description + self.ec2_groups = [] + self.ip_ranges = [] + + def __repr__(self): + return 'DBSecurityGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'IPRange': + cidr = IPRange(self) + self.ip_ranges.append(cidr) + return cidr + elif name == 'EC2SecurityGroup': + ec2_grp = EC2SecurityGroup(self) + self.ec2_groups.append(ec2_grp) + return ec2_grp + else: + return None + + def endElement(self, name, value, connection): + if name == 'OwnerId': + self.owner_id = value + elif name == 'DBSecurityGroupName': + self.name = value + elif name == 'DBSecurityGroupDescription': + self.description = value + elif name == 'IPRanges': + pass + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_dbsecurity_group(self.name) + + def authorize(self, cidr_ip=None, ec2_group=None): + """ + Add a new rule to this DBSecurity group. + You need to pass in either a CIDR block to authorize or + and EC2 SecurityGroup. + + :type cidr_ip: string + :param cidr_ip: A valid CIDR IP range to authorize + + :type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup` + :param ec2_group: An EC2 security group to authorize + + :rtype: bool + :return: True if successful. + """ + if isinstance(ec2_group, SecurityGroup): + group_name = ec2_group.name + group_owner_id = ec2_group.owner_id + else: + group_name = None + group_owner_id = None + return self.connection.authorize_dbsecurity_group(self.name, + cidr_ip, + group_name, + group_owner_id) + + def revoke(self, cidr_ip=None, ec2_group=None): + """ + Revoke access to a CIDR range or EC2 SecurityGroup. + You need to pass in either a CIDR block or + an EC2 SecurityGroup from which to revoke access. + + :type cidr_ip: string + :param cidr_ip: A valid CIDR IP range to revoke + + :type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup` + :param ec2_group: An EC2 security group to revoke + + :rtype: bool + :return: True if successful. + """ + if isinstance(ec2_group, SecurityGroup): + group_name = ec2_group.name + group_owner_id = ec2_group.owner_id + return self.connection.revoke_dbsecurity_group( + self.name, + ec2_security_group_name=group_name, + ec2_security_group_owner_id=group_owner_id) + + # Revoking by CIDR IP range + return self.connection.revoke_dbsecurity_group( + self.name, cidr_ip=cidr_ip) + +class IPRange(object): + """ + Describes a CIDR address range for use in a DBSecurityGroup + + :ivar cidr_ip: IP Address range + """ + + def __init__(self, parent=None): + self.parent = parent + self.cidr_ip = None + self.status = None + + def __repr__(self): + return 'IPRange:%s' % self.cidr_ip + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'CIDRIP': + self.cidr_ip = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) + +class EC2SecurityGroup(object): + """ + Describes an EC2 security group for use in a DBSecurityGroup + """ + + def __init__(self, parent=None): + self.parent = parent + self.name = None + self.owner_id = None + + def __repr__(self): + return 'EC2SecurityGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'EC2SecurityGroupName': + self.name = value + elif name == 'EC2SecurityGroupOwnerId': + self.owner_id = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds/dbsnapshot.py b/desktop/core/ext-py/boto-2.38.0/boto/rds/dbsnapshot.py new file mode 100644 index 0000000000000000000000000000000000000000..16d8125be7f464f1e893da94bb206858ac3649fd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds/dbsnapshot.py @@ -0,0 +1,138 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class DBSnapshot(object): + """ + Represents a RDS DB Snapshot + + Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DBSnapshot.html + + :ivar engine_version: Specifies the version of the database engine + :ivar license_model: License model information for the restored DB instance + :ivar allocated_storage: Specifies the allocated storage size in gigabytes (GB) + :ivar availability_zone: Specifies the name of the Availability Zone the DB Instance was located in at the time of the DB Snapshot + :ivar connection: boto.rds.RDSConnection associated with the current object + :ivar engine: Specifies the name of the database engine + :ivar id: Specifies the identifier for the DB Snapshot (DBSnapshotIdentifier) + :ivar instance_create_time: Specifies the time (UTC) when the snapshot was taken + :ivar instance_id: Specifies the the DBInstanceIdentifier of the DB Instance this DB Snapshot was created from (DBInstanceIdentifier) + :ivar master_username: Provides the master username for the DB Instance + :ivar port: Specifies the port that the database engine was listening on at the time of the snapshot + :ivar snapshot_create_time: Provides the time (UTC) when the snapshot was taken + :ivar status: Specifies the status of this DB Snapshot. Possible values are [ available, backing-up, creating, deleted, deleting, failed, modifying, rebooting, resetting-master-credentials ] + :ivar iops: Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot. + :ivar option_group_name: Provides the option group name for the DB snapshot. + :ivar percent_progress: The percentage of the estimated data that has been transferred. + :ivar snapshot_type: Provides the type of the DB snapshot. + :ivar source_region: The region that the DB snapshot was created in or copied from. + :ivar vpc_id: Provides the Vpc Id associated with the DB snapshot. + """ + + def __init__(self, connection=None, id=None): + self.connection = connection + self.id = id + self.engine = None + self.engine_version = None + self.snapshot_create_time = None + self.instance_create_time = None + self.port = None + self.status = None + self.availability_zone = None + self.master_username = None + self.allocated_storage = None + self.instance_id = None + self.availability_zone = None + self.license_model = None + self.iops = None + self.option_group_name = None + self.percent_progress = None + self.snapshot_type = None + self.source_region = None + self.vpc_id = None + + def __repr__(self): + return 'DBSnapshot:%s' % self.id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Engine': + self.engine = value + elif name == 'EngineVersion': + self.engine_version = value + elif name == 'InstanceCreateTime': + self.instance_create_time = value + elif name == 'SnapshotCreateTime': + self.snapshot_create_time = value + elif name == 'DBInstanceIdentifier': + self.instance_id = value + elif name == 'DBSnapshotIdentifier': + self.id = value + elif name == 'Port': + self.port = int(value) + elif name == 'Status': + self.status = value + elif name == 'AvailabilityZone': + self.availability_zone = value + elif name == 'MasterUsername': + self.master_username = value + elif name == 'AllocatedStorage': + self.allocated_storage = int(value) + elif name == 'SnapshotTime': + self.time = value + elif name == 'LicenseModel': + self.license_model = value + elif name == 'Iops': + self.iops = int(value) + elif name == 'OptionGroupName': + self.option_group_name = value + elif name == 'PercentProgress': + self.percent_progress = int(value) + elif name == 'SnapshotType': + self.snapshot_type = value + elif name == 'SourceRegion': + self.source_region = value + elif name == 'VpcId': + self.vpc_id = value + else: + setattr(self, name, value) + + def update(self, validate=False): + """ + Update the DB snapshot's status information by making a call to fetch + the current snapshot attributes from the service. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + instance the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + rs = self.connection.get_all_dbsnapshots(self.id) + if len(rs) > 0: + for i in rs: + if i.id == self.id: + self.__dict__.update(i.__dict__) + elif validate: + raise ValueError('%s is not a valid Snapshot ID' % self.id) + return self.status diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds/dbsubnetgroup.py b/desktop/core/ext-py/boto-2.38.0/boto/rds/dbsubnetgroup.py new file mode 100644 index 0000000000000000000000000000000000000000..4f6bde892458212d3c99069f10d7d2ac10541563 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds/dbsubnetgroup.py @@ -0,0 +1,69 @@ +# Copyright (c) 2013 Franc Carter - franc.carter@gmail.com +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an DBSubnetGroup +""" + +class DBSubnetGroup(object): + """ + Represents an RDS database subnet group + + Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSubnetGroup.html + + :ivar status: The current status of the subnet group. Possibile values are [ active, ? ]. Reference documentation lacks specifics of possibilities + :ivar connection: boto.rds.RDSConnection associated with the current object + :ivar description: The description of the subnet group + :ivar subnet_ids: List of subnet identifiers in the group + :ivar name: Name of the subnet group + :ivar vpc_id: The ID of the VPC the subnets are inside + """ + def __init__(self, connection=None, name=None, description=None, subnet_ids=None): + self.connection = connection + self.name = name + self.description = description + if subnet_ids is not None: + self.subnet_ids = subnet_ids + else: + self.subnet_ids = [] + self.vpc_id = None + self.status = None + + def __repr__(self): + return 'DBSubnetGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'SubnetIdentifier': + self.subnet_ids.append(value) + elif name == 'DBSubnetGroupName': + self.name = value + elif name == 'DBSubnetGroupDescription': + self.description = value + elif name == 'VpcId': + self.vpc_id = value + elif name == 'SubnetGroupStatus': + self.status = value + else: + setattr(self, name, value) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds/event.py b/desktop/core/ext-py/boto-2.38.0/boto/rds/event.py new file mode 100644 index 0000000000000000000000000000000000000000..a91f8f08a50c9efa9a10021cc814429710348ec6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds/event.py @@ -0,0 +1,49 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Event(object): + + def __init__(self, connection=None): + self.connection = connection + self.message = None + self.source_identifier = None + self.source_type = None + self.engine = None + self.date = None + + def __repr__(self): + return '"%s"' % self.message + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'SourceIdentifier': + self.source_identifier = value + elif name == 'SourceType': + self.source_type = value + elif name == 'Message': + self.message = value + elif name == 'Date': + self.date = value + else: + setattr(self, name, value) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds/logfile.py b/desktop/core/ext-py/boto-2.38.0/boto/rds/logfile.py new file mode 100644 index 0000000000000000000000000000000000000000..dd80a6ff82d097680e5046ecbdc0aa1b08192b4b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds/logfile.py @@ -0,0 +1,68 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Jumping Qu http://newrice.blogspot.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class LogFile(object): + + def __init__(self, connection=None): + self.connection = connection + self.size = None + self.log_filename = None + self.last_written = None + + def __repr__(self): + #return '(%s, %s, %s)' % (self.logfilename, self.size, self.lastwritten) + return '%s' % (self.log_filename) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'LastWritten': + self.last_written = value + elif name == 'LogFileName': + self.log_filename = value + elif name == 'Size': + self.size = value + else: + setattr(self, name, value) + + +class LogFileObject(object): + def __init__(self, connection=None): + self.connection = connection + self.log_filename = None + + def __repr__(self): + return "LogFileObject: %s/%s" % (self.dbinstance_id, self.log_filename) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'LogFileData': + self.data = value + elif name == 'AdditionalDataPending': + self.additional_data_pending = value + elif name == 'Marker': + self.marker = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds/optiongroup.py b/desktop/core/ext-py/boto-2.38.0/boto/rds/optiongroup.py new file mode 100644 index 0000000000000000000000000000000000000000..8968b6cad65a3b2e79c673f740c628c0f72ba83a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds/optiongroup.py @@ -0,0 +1,404 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an OptionGroup +""" + +from boto.rds.dbsecuritygroup import DBSecurityGroup +from boto.resultset import ResultSet + + +class OptionGroup(object): + """ + Represents an RDS option group + + Properties reference available from the AWS documentation at + http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_OptionGroup.html + + :ivar connection: :py:class:`boto.rds.RDSConnection` associated with the + current object + :ivar name: Name of the option group + :ivar description: The description of the option group + :ivar engine_name: The name of the database engine to use + :ivar major_engine_version: The major version number of the engine to use + :ivar allow_both_vpc_and_nonvpc: Indicates whether this option group can be + applied to both VPC and non-VPC instances. + The value ``True`` indicates the option + group can be applied to both VPC and + non-VPC instances. + :ivar vpc_id: If AllowsVpcAndNonVpcInstanceMemberships is 'false', this + field is blank. If AllowsVpcAndNonVpcInstanceMemberships is + ``True`` and this field is blank, then this option group can + be applied to both VPC and non-VPC instances. If this field + contains a value, then this option group can only be applied + to instances that are in the VPC indicated by this field. + :ivar options: The list of :py:class:`boto.rds.optiongroup.Option` objects + associated with the group + """ + def __init__(self, connection=None, name=None, engine_name=None, + major_engine_version=None, description=None, + allow_both_vpc_and_nonvpc=False, vpc_id=None): + self.name = name + self.engine_name = engine_name + self.major_engine_version = major_engine_version + self.description = description + self.allow_both_vpc_and_nonvpc = allow_both_vpc_and_nonvpc + self.vpc_id = vpc_id + self.options = [] + + def __repr__(self): + return 'OptionGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'Options': + self.options = ResultSet([ + ('Options', Option) + ]) + else: + return None + + def endElement(self, name, value, connection): + if name == 'OptionGroupName': + self.name = value + elif name == 'EngineName': + self.engine_name = value + elif name == 'MajorEngineVersion': + self.major_engine_version = value + elif name == 'OptionGroupDescription': + self.description = value + elif name == 'AllowsVpcAndNonVpcInstanceMemberships': + if value.lower() == 'true': + self.allow_both_vpc_and_nonvpc = True + else: + self.allow_both_vpc_and_nonvpc = False + elif name == 'VpcId': + self.vpc_id = value + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_option_group(self.name) + + +class Option(object): + """ + Describes a Option for use in an OptionGroup + + :ivar name: The name of the option + :ivar description: The description of the option. + :ivar permanent: Indicate if this option is permanent. + :ivar persistent: Indicate if this option is persistent. + :ivar port: If required, the port configured for this option to use. + :ivar settings: The option settings for this option. + :ivar db_security_groups: If the option requires access to a port, then + this DB Security Group allows access to the port. + :ivar vpc_security_groups: If the option requires access to a port, then + this VPC Security Group allows access to the + port. + """ + def __init__(self, name=None, description=None, permanent=False, + persistent=False, port=None, settings=None, + db_security_groups=None, vpc_security_groups=None): + self.name = name + self.description = description + self.permanent = permanent + self.persistent = persistent + self.port = port + self.settings = settings + self.db_security_groups = db_security_groups + self.vpc_security_groups = vpc_security_groups + + if self.settings is None: + self.settings = [] + + if self.db_security_groups is None: + self.db_security_groups = [] + + if self.vpc_security_groups is None: + self.vpc_security_groups = [] + + def __repr__(self): + return 'Option:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'OptionSettings': + self.settings = ResultSet([ + ('OptionSettings', OptionSetting) + ]) + elif name == 'DBSecurityGroupMemberships': + self.db_security_groups = ResultSet([ + ('DBSecurityGroupMemberships', DBSecurityGroup) + ]) + elif name == 'VpcSecurityGroupMemberships': + self.vpc_security_groups = ResultSet([ + ('VpcSecurityGroupMemberships', VpcSecurityGroup) + ]) + else: + return None + + def endElement(self, name, value, connection): + if name == 'OptionName': + self.name = value + elif name == 'OptionDescription': + self.description = value + elif name == 'Permanent': + if value.lower() == 'true': + self.permenant = True + else: + self.permenant = False + elif name == 'Persistent': + if value.lower() == 'true': + self.persistent = True + else: + self.persistent = False + elif name == 'Port': + self.port = int(value) + else: + setattr(self, name, value) + + +class OptionSetting(object): + """ + Describes a OptionSetting for use in an Option + + :ivar name: The name of the option that has settings that you can set. + :ivar description: The description of the option setting. + :ivar value: The current value of the option setting. + :ivar default_value: The default value of the option setting. + :ivar allowed_values: The allowed values of the option setting. + :ivar data_type: The data type of the option setting. + :ivar apply_type: The DB engine specific parameter type. + :ivar is_modifiable: A Boolean value that, when true, indicates the option + setting can be modified from the default. + :ivar is_collection: Indicates if the option setting is part of a + collection. + """ + + def __init__(self, name=None, description=None, value=None, + default_value=False, allowed_values=None, data_type=None, + apply_type=None, is_modifiable=False, is_collection=False): + self.name = name + self.description = description + self.value = value + self.default_value = default_value + self.allowed_values = allowed_values + self.data_type = data_type + self.apply_type = apply_type + self.is_modifiable = is_modifiable + self.is_collection = is_collection + + def __repr__(self): + return 'OptionSetting:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'Description': + self.description = value + elif name == 'Value': + self.value = value + elif name == 'DefaultValue': + self.default_value = value + elif name == 'AllowedValues': + self.allowed_values = value + elif name == 'DataType': + self.data_type = value + elif name == 'ApplyType': + self.apply_type = value + elif name == 'IsModifiable': + if value.lower() == 'true': + self.is_modifiable = True + else: + self.is_modifiable = False + elif name == 'IsCollection': + if value.lower() == 'true': + self.is_collection = True + else: + self.is_collection = False + else: + setattr(self, name, value) + + +class VpcSecurityGroup(object): + """ + Describes a VPC security group for use in a OptionGroup + """ + def __init__(self, vpc_id=None, status=None): + self.vpc_id = vpc_id + self.status = status + + def __repr__(self): + return 'VpcSecurityGroup:%s' % self.vpc_id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'VpcSecurityGroupId': + self.vpc_id = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) + + +class OptionGroupOption(object): + """ + Describes a OptionGroupOption for use in an OptionGroup + + :ivar name: The name of the option + :ivar description: The description of the option. + :ivar engine_name: Engine name that this option can be applied to. + :ivar major_engine_version: Indicates the major engine version that the + option is available for. + :ivar min_minor_engine_version: The minimum required engine version for the + option to be applied. + :ivar permanent: Indicate if this option is permanent. + :ivar persistent: Indicate if this option is persistent. + :ivar port_required: Specifies whether the option requires a port. + :ivar default_port: If the option requires a port, specifies the default + port for the option. + :ivar settings: The option settings for this option. + :ivar depends_on: List of all options that are prerequisites for this + option. + """ + def __init__(self, name=None, description=None, engine_name=None, + major_engine_version=None, min_minor_engine_version=None, + permanent=False, persistent=False, port_required=False, + default_port=None, settings=None, depends_on=None): + self.name = name + self.description = description + self.engine_name = engine_name + self.major_engine_version = major_engine_version + self.min_minor_engine_version = min_minor_engine_version + self.permanent = permanent + self.persistent = persistent + self.port_required = port_required + self.default_port = default_port + self.settings = settings + self.depends_on = depends_on + + if self.settings is None: + self.settings = [] + + if self.depends_on is None: + self.depends_on = [] + + def __repr__(self): + return 'OptionGroupOption:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'OptionGroupOptionSettings': + self.settings = ResultSet([ + ('OptionGroupOptionSettings', OptionGroupOptionSetting) + ]) + elif name == 'OptionsDependedOn': + self.depends_on = [] + else: + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'Description': + self.description = value + elif name == 'EngineName': + self.engine_name = value + elif name == 'MajorEngineVersion': + self.major_engine_version = value + elif name == 'MinimumRequiredMinorEngineVersion': + self.min_minor_engine_version = value + elif name == 'Permanent': + if value.lower() == 'true': + self.permenant = True + else: + self.permenant = False + elif name == 'Persistent': + if value.lower() == 'true': + self.persistent = True + else: + self.persistent = False + elif name == 'PortRequired': + if value.lower() == 'true': + self.port_required = True + else: + self.port_required = False + elif name == 'DefaultPort': + self.default_port = int(value) + else: + setattr(self, name, value) + + +class OptionGroupOptionSetting(object): + """ + Describes a OptionGroupOptionSetting for use in an OptionGroupOption. + + :ivar name: The name of the option that has settings that you can set. + :ivar description: The description of the option setting. + :ivar value: The current value of the option setting. + :ivar default_value: The default value of the option setting. + :ivar allowed_values: The allowed values of the option setting. + :ivar data_type: The data type of the option setting. + :ivar apply_type: The DB engine specific parameter type. + :ivar is_modifiable: A Boolean value that, when true, indicates the option + setting can be modified from the default. + :ivar is_collection: Indicates if the option setting is part of a + collection. + """ + + def __init__(self, name=None, description=None, default_value=False, + allowed_values=None, apply_type=None, is_modifiable=False): + self.name = name + self.description = description + self.default_value = default_value + self.allowed_values = allowed_values + self.apply_type = apply_type + self.is_modifiable = is_modifiable + + def __repr__(self): + return 'OptionGroupOptionSetting:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'SettingName': + self.name = value + elif name == 'SettingDescription': + self.description = value + elif name == 'DefaultValue': + self.default_value = value + elif name == 'AllowedValues': + self.allowed_values = value + elif name == 'ApplyType': + self.apply_type = value + elif name == 'IsModifiable': + if value.lower() == 'true': + self.is_modifiable = True + else: + self.is_modifiable = False + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds/parametergroup.py b/desktop/core/ext-py/boto-2.38.0/boto/rds/parametergroup.py new file mode 100644 index 0000000000000000000000000000000000000000..ade3b807e7a9c2cd6a03530e3065dc1cbb5fe8ad --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds/parametergroup.py @@ -0,0 +1,201 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class ParameterGroup(dict): + + def __init__(self, connection=None): + dict.__init__(self) + self.connection = connection + self.name = None + self.description = None + self.engine = None + self._current_param = None + + def __repr__(self): + return 'ParameterGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'Parameter': + if self._current_param: + self[self._current_param.name] = self._current_param + self._current_param = Parameter(self) + return self._current_param + + def endElement(self, name, value, connection): + if name == 'DBParameterGroupName': + self.name = value + elif name == 'Description': + self.description = value + elif name == 'Engine': + self.engine = value + else: + setattr(self, name, value) + + def modifiable(self): + mod = [] + for key in self: + p = self[key] + if p.is_modifiable: + mod.append(p) + return mod + + def get_params(self): + pg = self.connection.get_all_dbparameters(self.name) + self.update(pg) + + def add_param(self, name, value, apply_method): + param = Parameter() + param.name = name + param.value = value + param.apply_method = apply_method + self.params.append(param) + +class Parameter(object): + """ + Represents a RDS Parameter + """ + + ValidTypes = {'integer' : int, + 'string' : str, + 'boolean' : bool} + ValidSources = ['user', 'system', 'engine-default'] + ValidApplyTypes = ['static', 'dynamic'] + ValidApplyMethods = ['immediate', 'pending-reboot'] + + def __init__(self, group=None, name=None): + self.group = group + self.name = name + self._value = None + self.type = 'string' + self.source = None + self.is_modifiable = True + self.description = None + self.apply_method = None + self.allowed_values = None + + def __repr__(self): + return 'Parameter:%s' % self.name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'ParameterName': + self.name = value + elif name == 'ParameterValue': + self._value = value + elif name == 'DataType': + if value in self.ValidTypes: + self.type = value + elif name == 'Source': + if value in self.ValidSources: + self.source = value + elif name == 'IsModifiable': + if value.lower() == 'true': + self.is_modifiable = True + else: + self.is_modifiable = False + elif name == 'Description': + self.description = value + elif name == 'ApplyType': + if value in self.ValidApplyTypes: + self.apply_type = value + elif name == 'AllowedValues': + self.allowed_values = value + else: + setattr(self, name, value) + + def merge(self, d, i): + prefix = 'Parameters.member.%d.' % i + if self.name: + d[prefix+'ParameterName'] = self.name + if self._value is not None: + d[prefix+'ParameterValue'] = self._value + if self.apply_type: + d[prefix+'ApplyMethod'] = self.apply_method + + def _set_string_value(self, value): + if not isinstance(value, basestring): + raise ValueError('value must be of type str') + if self.allowed_values: + choices = self.allowed_values.split(',') + if value not in choices: + raise ValueError('value must be in %s' % self.allowed_values) + self._value = value + + def _set_integer_value(self, value): + if isinstance(value, basestring): + value = int(value) + if isinstance(value, int) or isinstance(value, long): + if self.allowed_values: + min, max = self.allowed_values.split('-') + if value < int(min) or value > int(max): + raise ValueError('range is %s' % self.allowed_values) + self._value = value + else: + raise ValueError('value must be integer') + + def _set_boolean_value(self, value): + if isinstance(value, bool): + self._value = value + elif isinstance(value, basestring): + if value.lower() == 'true': + self._value = True + else: + self._value = False + else: + raise ValueError('value must be boolean') + + def set_value(self, value): + if self.type == 'string': + self._set_string_value(value) + elif self.type == 'integer': + self._set_integer_value(value) + elif self.type == 'boolean': + self._set_boolean_value(value) + else: + raise TypeError('unknown type (%s)' % self.type) + + def get_value(self): + if self._value is None: + return self._value + if self.type == 'string': + return self._value + elif self.type == 'integer': + if not isinstance(self._value, int) and not isinstance(self._value, long): + self._set_integer_value(self._value) + return self._value + elif self.type == 'boolean': + if not isinstance(self._value, bool): + self._set_boolean_value(self._value) + return self._value + else: + raise TypeError('unknown type (%s)' % self.type) + + value = property(get_value, set_value, 'The value of the parameter') + + def apply(self, immediate=False): + if immediate: + self.apply_method = 'immediate' + else: + self.apply_method = 'pending-reboot' + self.group.connection.modify_parameter_group(self.group.name, [self]) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds/regioninfo.py b/desktop/core/ext-py/boto-2.38.0/boto/rds/regioninfo.py new file mode 100644 index 0000000000000000000000000000000000000000..5019aca90f761d0b28607046ca74d375fdb0a649 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds/regioninfo.py @@ -0,0 +1,33 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo + +class RDSRegionInfo(RegionInfo): + + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + from boto.rds import RDSConnection + super(RDSRegionInfo, self).__init__(connection, name, endpoint, + RDSConnection) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds/statusinfo.py b/desktop/core/ext-py/boto-2.38.0/boto/rds/statusinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..d4ff9b08deef8d2195ded3075763211e82a42642 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds/statusinfo.py @@ -0,0 +1,54 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +class StatusInfo(object): + """ + Describes a status message. + """ + + def __init__(self, status_type=None, normal=None, status=None, message=None): + self.status_type = status_type + self.normal = normal + self.status = status + self.message = message + + def __repr__(self): + return 'StatusInfo:%s' % self.message + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'StatusType': + self.status_type = value + elif name == 'Normal': + if value.lower() == 'true': + self.normal = True + else: + self.normal = False + elif name == 'Status': + self.status = value + elif name == 'Message': + self.message = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds/vpcsecuritygroupmembership.py b/desktop/core/ext-py/boto-2.38.0/boto/rds/vpcsecuritygroupmembership.py new file mode 100644 index 0000000000000000000000000000000000000000..e0092e9c2fb2aba3ee7b73b9e40893d5ee97d38b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds/vpcsecuritygroupmembership.py @@ -0,0 +1,85 @@ +# Copyright (c) 2013 Anthony Tonns http://www.corsis.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a VPCSecurityGroupMembership +""" + + +class VPCSecurityGroupMembership(object): + """ + Represents VPC Security Group that this RDS database is a member of + + Properties reference available from the AWS documentation at + http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/\ + API_VpcSecurityGroupMembership.html + + Example:: + pri = "sg-abcdefgh" + sec = "sg-hgfedcba" + + # Create with list of str + db = c.create_dbinstance(... vpc_security_groups=[pri], ... ) + + # Modify with list of str + db.modify(... vpc_security_groups=[pri,sec], ... ) + + # Create with objects + memberships = [] + membership = VPCSecurityGroupMembership() + membership.vpc_group = pri + memberships.append(membership) + + db = c.create_dbinstance(... vpc_security_groups=memberships, ... ) + + # Modify with objects + memberships = d.vpc_security_groups + membership = VPCSecurityGroupMembership() + membership.vpc_group = sec + memberships.append(membership) + + db.modify(... vpc_security_groups=memberships, ... ) + + :ivar connection: :py:class:`boto.rds.RDSConnection` associated with the + current object + :ivar vpc_group: This id of the VPC security group + :ivar status: Status of the VPC security group membership + ` objects that this RDS Instance + is a member of + """ + def __init__(self, connection=None, status=None, vpc_group=None): + self.connection = connection + self.status = status + self.vpc_group = vpc_group + + def __repr__(self): + return 'VPCSecurityGroupMembership:%s' % self.vpc_group + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'VpcSecurityGroupId': + self.vpc_group = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds2/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/rds2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..023a0baa957f90e661aed412c0bc9573654d5fdb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds2/__init__.py @@ -0,0 +1,53 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import get_regions + + +def regions(): + """ + Get all available regions for the RDS service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.rds2.layer1 import RDSConnection + return get_regions('rds', connection_cls=RDSConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.rds2.layer1.RDSConnection`. + Any additional parameters after the region_name are passed on to + the connect method of the region object. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.rds2.layer1.RDSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds2/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/rds2/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..be610b0171209d51fa4630d6caa25f340c00d29a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds2/exceptions.py @@ -0,0 +1,234 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class InvalidSubnet(JSONResponseError): + pass + + +class DBParameterGroupQuotaExceeded(JSONResponseError): + pass + + +class DBSubnetGroupAlreadyExists(JSONResponseError): + pass + + +class DBSubnetGroupQuotaExceeded(JSONResponseError): + pass + + +class InstanceQuotaExceeded(JSONResponseError): + pass + + +class InvalidRestore(JSONResponseError): + pass + + +class InvalidDBParameterGroupState(JSONResponseError): + pass + + +class AuthorizationQuotaExceeded(JSONResponseError): + pass + + +class DBSecurityGroupAlreadyExists(JSONResponseError): + pass + + +class InsufficientDBInstanceCapacity(JSONResponseError): + pass + + +class ReservedDBInstanceQuotaExceeded(JSONResponseError): + pass + + +class DBSecurityGroupNotFound(JSONResponseError): + pass + + +class DBInstanceAlreadyExists(JSONResponseError): + pass + + +class ReservedDBInstanceNotFound(JSONResponseError): + pass + + +class DBSubnetGroupDoesNotCoverEnoughAZs(JSONResponseError): + pass + + +class InvalidDBSecurityGroupState(JSONResponseError): + pass + + +class InvalidVPCNetworkState(JSONResponseError): + pass + + +class ReservedDBInstancesOfferingNotFound(JSONResponseError): + pass + + +class SNSTopicArnNotFound(JSONResponseError): + pass + + +class SNSNoAuthorization(JSONResponseError): + pass + + +class SnapshotQuotaExceeded(JSONResponseError): + pass + + +class OptionGroupQuotaExceeded(JSONResponseError): + pass + + +class DBParameterGroupNotFound(JSONResponseError): + pass + + +class SNSInvalidTopic(JSONResponseError): + pass + + +class InvalidDBSubnetGroupState(JSONResponseError): + pass + + +class DBSubnetGroupNotFound(JSONResponseError): + pass + + +class InvalidOptionGroupState(JSONResponseError): + pass + + +class SourceNotFound(JSONResponseError): + pass + + +class SubscriptionCategoryNotFound(JSONResponseError): + pass + + +class EventSubscriptionQuotaExceeded(JSONResponseError): + pass + + +class DBSecurityGroupNotSupported(JSONResponseError): + pass + + +class InvalidEventSubscriptionState(JSONResponseError): + pass + + +class InvalidDBSubnetState(JSONResponseError): + pass + + +class InvalidDBSnapshotState(JSONResponseError): + pass + + +class SubscriptionAlreadyExist(JSONResponseError): + pass + + +class DBSecurityGroupQuotaExceeded(JSONResponseError): + pass + + +class ProvisionedIopsNotAvailableInAZ(JSONResponseError): + pass + + +class AuthorizationNotFound(JSONResponseError): + pass + + +class OptionGroupAlreadyExists(JSONResponseError): + pass + + +class SubscriptionNotFound(JSONResponseError): + pass + + +class DBUpgradeDependencyFailure(JSONResponseError): + pass + + +class PointInTimeRestoreNotEnabled(JSONResponseError): + pass + + +class AuthorizationAlreadyExists(JSONResponseError): + pass + + +class DBSubnetQuotaExceeded(JSONResponseError): + pass + + +class OptionGroupNotFound(JSONResponseError): + pass + + +class DBParameterGroupAlreadyExists(JSONResponseError): + pass + + +class DBInstanceNotFound(JSONResponseError): + pass + + +class ReservedDBInstanceAlreadyExists(JSONResponseError): + pass + + +class InvalidDBInstanceState(JSONResponseError): + pass + + +class DBSnapshotNotFound(JSONResponseError): + pass + + +class DBSnapshotAlreadyExists(JSONResponseError): + pass + + +class StorageQuotaExceeded(JSONResponseError): + pass + + +class SubnetAlreadyInUse(JSONResponseError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/rds2/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/rds2/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..5615f1107db93270538336b2454ae73436d31f4f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/rds2/layer1.py @@ -0,0 +1,3770 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.rds2 import exceptions +from boto.compat import json + + +class RDSConnection(AWSQueryConnection): + """ + Amazon Relational Database Service + Amazon Relational Database Service (Amazon RDS) is a web service + that makes it easier to set up, operate, and scale a relational + database in the cloud. It provides cost-efficient, resizable + capacity for an industry-standard relational database and manages + common database administration tasks, freeing up developers to + focus on what makes their applications and businesses unique. + + Amazon RDS gives you access to the capabilities of a familiar + MySQL or Oracle database server. This means the code, + applications, and tools you already use today with your existing + MySQL or Oracle databases work with Amazon RDS without + modification. Amazon RDS automatically backs up your database and + maintains the database software that powers your DB instance. + Amazon RDS is flexible: you can scale your database instance's + compute resources and storage capacity to meet your application's + demand. As with all Amazon Web Services, there are no up-front + investments, and you pay only for the resources you use. + + This is the Amazon RDS API Reference . It contains a comprehensive + description of all Amazon RDS Query APIs and data types. Note that + this API is asynchronous and some actions may require polling to + determine when an action has been applied. See the parameter + description to determine if a change is applied immediately or on + the next instance reboot or during the maintenance window. For + more information on Amazon RDS concepts and usage scenarios, go to + the `Amazon RDS User Guide`_. + """ + APIVersion = "2013-09-09" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "InvalidSubnet": exceptions.InvalidSubnet, + "DBParameterGroupQuotaExceeded": exceptions.DBParameterGroupQuotaExceeded, + "DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists, + "DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded, + "InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded, + "InvalidRestore": exceptions.InvalidRestore, + "InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState, + "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded, + "DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists, + "InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity, + "ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded, + "DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound, + "DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists, + "ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound, + "DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs, + "InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState, + "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState, + "ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound, + "SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound, + "SNSNoAuthorization": exceptions.SNSNoAuthorization, + "SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded, + "OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded, + "DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound, + "SNSInvalidTopic": exceptions.SNSInvalidTopic, + "InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState, + "DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound, + "InvalidOptionGroupState": exceptions.InvalidOptionGroupState, + "SourceNotFound": exceptions.SourceNotFound, + "SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound, + "EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded, + "DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported, + "InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState, + "InvalidDBSubnetState": exceptions.InvalidDBSubnetState, + "InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState, + "SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist, + "DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded, + "ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ, + "AuthorizationNotFound": exceptions.AuthorizationNotFound, + "OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists, + "SubscriptionNotFound": exceptions.SubscriptionNotFound, + "DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure, + "PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled, + "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists, + "DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded, + "OptionGroupNotFound": exceptions.OptionGroupNotFound, + "DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists, + "DBInstanceNotFound": exceptions.DBInstanceNotFound, + "ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists, + "InvalidDBInstanceState": exceptions.InvalidDBInstanceState, + "DBSnapshotNotFound": exceptions.DBSnapshotNotFound, + "DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists, + "StorageQuotaExceeded": exceptions.StorageQuotaExceeded, + "SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + + super(RDSConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_source_identifier_to_subscription(self, subscription_name, + source_identifier): + """ + Adds a source identifier to an existing RDS event notification + subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to add a source identifier to. + + :type source_identifier: string + :param source_identifier: + The identifier of the event source to be added. An identifier must + begin with a letter and must contain only ASCII letters, digits, + and hyphens; it cannot end with a hyphen or contain two consecutive + hyphens. + + Constraints: + + + + If the source type is a DB instance, then a `DBInstanceIdentifier` + must be supplied. + + If the source type is a DB security group, a `DBSecurityGroupName` + must be supplied. + + If the source type is a DB parameter group, a `DBParameterGroupName` + must be supplied. + + If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be + supplied. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SourceIdentifier': source_identifier, + } + return self._make_request( + action='AddSourceIdentifierToSubscription', + verb='POST', + path='/', params=params) + + def add_tags_to_resource(self, resource_name, tags): + """ + Adds metadata tags to an Amazon RDS resource. These tags can + also be used with cost allocation reporting to track cost + associated with Amazon RDS resources, or used in Condition + statement in IAM policy for Amazon RDS. + + For an overview on tagging Amazon RDS resources, see `Tagging + Amazon RDS Resources`_. + + :type resource_name: string + :param resource_name: The Amazon RDS resource the tags will be added + to. This value is an Amazon Resource Name (ARN). For information + about creating an ARN, see ` Constructing an RDS Amazon Resource + Name (ARN)`_. + + :type tags: list + :param tags: The tags to be assigned to the Amazon RDS resource. + + """ + params = {'ResourceName': resource_name, } + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='AddTagsToResource', + verb='POST', + path='/', params=params) + + def authorize_db_security_group_ingress(self, db_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_id=None, + ec2_security_group_owner_id=None): + """ + Enables ingress to a DBSecurityGroup using one of two forms of + authorization. First, EC2 or VPC security groups can be added + to the DBSecurityGroup if the application using the database + is running on EC2 or VPC instances. Second, IP ranges are + available if the application accessing your database is + running on the Internet. Required parameters for this API are + one of CIDR range, EC2SecurityGroupId for VPC, or + (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or + EC2SecurityGroupId for non-VPC). + You cannot authorize ingress from an EC2 security group in one + Region to an Amazon RDS DB instance in another. You cannot + authorize ingress from a VPC security group in one VPC to an + Amazon RDS DB instance in another. + For an overview of CIDR ranges, go to the `Wikipedia + Tutorial`_. + + :type db_security_group_name: string + :param db_security_group_name: The name of the DB security group to add + authorization to. + + :type cidrip: string + :param cidrip: The IP range to authorize. + + :type ec2_security_group_name: string + :param ec2_security_group_name: Name of the EC2 security group to + authorize. For VPC DB security groups, `EC2SecurityGroupId` must be + provided. Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_id: string + :param ec2_security_group_id: Id of the EC2 security group to + authorize. For VPC DB security groups, `EC2SecurityGroupId` must be + provided. Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: AWS Account Number of the owner of + the EC2 security group specified in the EC2SecurityGroupName + parameter. The AWS Access Key ID is not an acceptable value. For + VPC DB security groups, `EC2SecurityGroupId` must be provided. + Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + """ + params = {'DBSecurityGroupName': db_security_group_name, } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_id is not None: + params['EC2SecurityGroupId'] = ec2_security_group_id + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='AuthorizeDBSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def copy_db_snapshot(self, source_db_snapshot_identifier, + target_db_snapshot_identifier, tags=None): + """ + Copies the specified DBSnapshot. The source DBSnapshot must be + in the "available" state. + + :type source_db_snapshot_identifier: string + :param source_db_snapshot_identifier: The identifier for the source DB + snapshot. + Constraints: + + + + Must be the identifier for a valid system snapshot in the "available" + state. + + + Example: `rds:mydb-2012-04-02-00-01` + + :type target_db_snapshot_identifier: string + :param target_db_snapshot_identifier: The identifier for the copied + snapshot. + Constraints: + + + + Cannot be null, empty, or blank + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-db-snapshot` + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'SourceDBSnapshotIdentifier': source_db_snapshot_identifier, + 'TargetDBSnapshotIdentifier': target_db_snapshot_identifier, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CopyDBSnapshot', + verb='POST', + path='/', params=params) + + def create_db_instance(self, db_instance_identifier, allocated_storage, + db_instance_class, engine, master_username, + master_user_password, db_name=None, + db_security_groups=None, + vpc_security_group_ids=None, + availability_zone=None, db_subnet_group_name=None, + preferred_maintenance_window=None, + db_parameter_group_name=None, + backup_retention_period=None, + preferred_backup_window=None, port=None, + multi_az=None, engine_version=None, + auto_minor_version_upgrade=None, + license_model=None, iops=None, + option_group_name=None, character_set_name=None, + publicly_accessible=None, tags=None): + """ + Creates a new DB instance. + + :type db_name: string + :param db_name: The meaning of this parameter differs according to the + database engine you use. + **MySQL** + + The name of the database to create when the DB instance is created. If + this parameter is not specified, no database is created in the DB + instance. + + Constraints: + + + + Must contain 1 to 64 alphanumeric characters + + Cannot be a word reserved by the specified database engine + + + Type: String + + **Oracle** + + The Oracle System ID (SID) of the created DB instance. + + Default: `ORCL` + + Constraints: + + + + Cannot be longer than 8 characters + + + **SQL Server** + + Not applicable. Must be null. + + :type db_instance_identifier: string + :param db_instance_identifier: The DB instance identifier. This + parameter is stored as a lowercase string. + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 + for SQL Server). + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + + Example: `mydbinstance` + + :type allocated_storage: integer + :param allocated_storage: The amount of storage (in gigabytes) to be + initially allocated for the database instance. + **MySQL** + + Constraints: Must be an integer from 5 to 1024. + + Type: Integer + + **Oracle** + + Constraints: Must be an integer from 10 to 1024. + + **SQL Server** + + Constraints: Must be an integer from 200 to 1024 (Standard Edition and + Enterprise Edition) or from 30 to 1024 (Express Edition and Web + Edition) + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the DB + instance. + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge` + + :type engine: string + :param engine: The name of the database engine to be used for this + instance. + Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` | + `sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web` + + :type master_username: string + :param master_username: + The name of master user for the client DB instance. + + **MySQL** + + Constraints: + + + + Must be 1 to 16 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word for the chosen database engine. + + + Type: String + + **Oracle** + + Constraints: + + + + Must be 1 to 30 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word for the chosen database engine. + + + **SQL Server** + + Constraints: + + + + Must be 1 to 128 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word for the chosen database engine. + + :type master_user_password: string + :param master_user_password: The password for the master database user. + Can be any printable ASCII character except "/", '"', or "@". + Type: String + + **MySQL** + + Constraints: Must contain from 8 to 41 characters. + + **Oracle** + + Constraints: Must contain from 8 to 30 characters. + + **SQL Server** + + Constraints: Must contain from 8 to 128 characters. + + :type db_security_groups: list + :param db_security_groups: A list of DB security groups to associate + with this DB instance. + Default: The default DB security group for the database engine. + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: A list of EC2 VPC security groups to + associate with this DB instance. + Default: The default EC2 VPC security group for the DB subnet group's + VPC. + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone that the database + instance will be created in. + Default: A random, system-chosen Availability Zone in the endpoint's + region. + + Example: `us-east-1d` + + Constraint: The AvailabilityZone parameter cannot be specified if the + MultiAZ parameter is set to `True`. The specified Availability Zone + must be in the same region as the current endpoint. + + :type db_subnet_group_name: string + :param db_subnet_group_name: A DB subnet group to associate with this + DB instance. + If there is no DB subnet group, then it is a non-VPC DB instance. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur. + Format: `ddd:hh24:mi-ddd:hh24:mi` + + Default: A 30-minute window selected at random from an 8-hour block of + time per region, occurring on a random day of the week. To see the + time blocks available, see ` Adjusting the Preferred Maintenance + Window`_ in the Amazon RDS User Guide. + + Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + + Constraints: Minimum 30-minute window. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group to associate with this DB instance. + If this argument is omitted, the default DBParameterGroup for the + specified engine will be used. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type backup_retention_period: integer + :param backup_retention_period: + The number of days for which automated backups are retained. Setting + this parameter to a positive number enables backups. Setting this + parameter to 0 disables automated backups. + + Default: 1 + + Constraints: + + + + Must be a value from 0 to 8 + + Cannot be set to 0 if the DB instance is a master instance with read + replicas + + :type preferred_backup_window: string + :param preferred_backup_window: The daily time range during which + automated backups are created if automated backups are enabled, + using the `BackupRetentionPeriod` parameter. + Default: A 30-minute window selected at random from an 8-hour block of + time per region. See the Amazon RDS User Guide for the time blocks + for each region from which the default backup windows are assigned. + + Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be + Universal Time Coordinated (UTC). Must not conflict with the + preferred maintenance window. Must be at least 30 minutes. + + :type port: integer + :param port: The port number on which the database accepts connections. + **MySQL** + + Default: `3306` + + Valid Values: `1150-65535` + + Type: Integer + + **Oracle** + + Default: `1521` + + Valid Values: `1150-65535` + + **SQL Server** + + Default: `1433` + + Valid Values: `1150-65535` except for `1434` and `3389`. + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + You cannot set the AvailabilityZone parameter if the MultiAZ + parameter is set to true. + + :type engine_version: string + :param engine_version: The version number of the database engine to + use. + **MySQL** + + Example: `5.1.42` + + Type: String + + **Oracle** + + Example: `11.2.0.2.v2` + + Type: String + + **SQL Server** + + Example: `10.50.2789.0.v1` + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor engine upgrades + will be applied automatically to the DB instance during the + maintenance window. + Default: `True` + + :type license_model: string + :param license_model: License model information for this DB instance. + Valid values: `license-included` | `bring-your-own-license` | `general- + public-license` + + :type iops: integer + :param iops: The amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for the DB instance. + Constraints: Must be an integer greater than 1000. + + :type option_group_name: string + :param option_group_name: Indicates that the DB instance should be + associated with the specified option group. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type character_set_name: string + :param character_set_name: For supported engines, indicates that the DB + instance should be associated with the specified CharacterSet. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'AllocatedStorage': allocated_storage, + 'DBInstanceClass': db_instance_class, + 'Engine': engine, + 'MasterUsername': master_username, + 'MasterUserPassword': master_user_password, + } + if db_name is not None: + params['DBName'] = db_name + if db_security_groups is not None: + self.build_list_params(params, + db_security_groups, + 'DBSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if db_parameter_group_name is not None: + params['DBParameterGroupName'] = db_parameter_group_name + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window is not None: + params['PreferredBackupWindow'] = preferred_backup_window + if port is not None: + params['Port'] = port + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if engine_version is not None: + params['EngineVersion'] = engine_version + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if license_model is not None: + params['LicenseModel'] = license_model + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if character_set_name is not None: + params['CharacterSetName'] = character_set_name + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBInstance', + verb='POST', + path='/', params=params) + + def create_db_instance_read_replica(self, db_instance_identifier, + source_db_instance_identifier, + db_instance_class=None, + availability_zone=None, port=None, + auto_minor_version_upgrade=None, + iops=None, option_group_name=None, + publicly_accessible=None, tags=None): + """ + Creates a DB instance that acts as a read replica of a source + DB instance. + + All read replica DB instances are created as Single-AZ + deployments with backups disabled. All other DB instance + attributes (including DB security groups and DB parameter + groups) are inherited from the source DB instance, except as + specified below. + + The source DB instance must have backup retention enabled. + + :type db_instance_identifier: string + :param db_instance_identifier: The DB instance identifier of the read + replica. This is the unique key that identifies a DB instance. This + parameter is stored as a lowercase string. + + :type source_db_instance_identifier: string + :param source_db_instance_identifier: The identifier of the DB instance + that will act as the source for the read replica. Each DB instance + can have up to five read replicas. + Constraints: Must be the identifier of an existing DB instance that is + not already a read replica DB instance. + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the read + replica. + Valid Values: `db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge` + + Default: Inherits from the source DB instance. + + :type availability_zone: string + :param availability_zone: The Amazon EC2 Availability Zone that the + read replica will be created in. + Default: A random, system-chosen Availability Zone in the endpoint's + region. + + Example: `us-east-1d` + + :type port: integer + :param port: The port number that the DB instance uses for connections. + Default: Inherits from the source DB instance + + Valid Values: `1150-65535` + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor engine upgrades + will be applied automatically to the read replica during the + maintenance window. + Default: Inherits from the source DB instance + + :type iops: integer + :param iops: The amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for the DB instance. + + :type option_group_name: string + :param option_group_name: The option group the DB instance will be + associated with. If omitted, the default option group for the + engine specified will be used. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'SourceDBInstanceIdentifier': source_db_instance_identifier, + } + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if port is not None: + params['Port'] = port + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBInstanceReadReplica', + verb='POST', + path='/', params=params) + + def create_db_parameter_group(self, db_parameter_group_name, + db_parameter_group_family, description, + tags=None): + """ + Creates a new DB parameter group. + + A DB parameter group is initially created with the default + parameters for the database engine used by the DB instance. To + provide custom values for any of the parameters, you must + modify the group after creating it using + ModifyDBParameterGroup . Once you've created a DB parameter + group, you need to associate it with your DB instance using + ModifyDBInstance . When you associate a new DB parameter group + with a running DB instance, you need to reboot the DB Instance + for the new DB parameter group and associated settings to take + effect. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + This value is stored as a lower-case string. + + :type db_parameter_group_family: string + :param db_parameter_group_family: The DB parameter group family name. A + DB parameter group can be associated with one and only one DB + parameter group family, and can be applied only to a DB instance + running a database engine and engine version compatible with that + DB parameter group family. + + :type description: string + :param description: The description for the DB parameter group. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBParameterGroupName': db_parameter_group_name, + 'DBParameterGroupFamily': db_parameter_group_family, + 'Description': description, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBParameterGroup', + verb='POST', + path='/', params=params) + + def create_db_security_group(self, db_security_group_name, + db_security_group_description, tags=None): + """ + Creates a new DB security group. DB security groups control + access to a DB instance. + + :type db_security_group_name: string + :param db_security_group_name: The name for the DB security group. This + value is stored as a lowercase string. + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + Must not be "Default" + + May not contain spaces + + + Example: `mysecuritygroup` + + :type db_security_group_description: string + :param db_security_group_description: The description for the DB + security group. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBSecurityGroupName': db_security_group_name, + 'DBSecurityGroupDescription': db_security_group_description, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBSecurityGroup', + verb='POST', + path='/', params=params) + + def create_db_snapshot(self, db_snapshot_identifier, + db_instance_identifier, tags=None): + """ + Creates a DBSnapshot. The source DBInstance must be in + "available" state. + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: The identifier for the DB snapshot. + Constraints: + + + + Cannot be null, empty, or blank + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-snapshot-id` + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier. This is the unique key that identifies a DB + instance. This parameter isn't case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBSnapshotIdentifier': db_snapshot_identifier, + 'DBInstanceIdentifier': db_instance_identifier, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBSnapshot', + verb='POST', + path='/', params=params) + + def create_db_subnet_group(self, db_subnet_group_name, + db_subnet_group_description, subnet_ids, + tags=None): + """ + Creates a new DB subnet group. DB subnet groups must contain + at least one subnet in at least two AZs in the region. + + :type db_subnet_group_name: string + :param db_subnet_group_name: The name for the DB subnet group. This + value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. Must not be "Default". + + Example: `mySubnetgroup` + + :type db_subnet_group_description: string + :param db_subnet_group_description: The description for the DB subnet + group. + + :type subnet_ids: list + :param subnet_ids: The EC2 Subnet IDs for the DB subnet group. + + :type tags: list + :param tags: A list of tags into tuples. + + """ + params = { + 'DBSubnetGroupName': db_subnet_group_name, + 'DBSubnetGroupDescription': db_subnet_group_description, + } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBSubnetGroup', + verb='POST', + path='/', params=params) + + def create_event_subscription(self, subscription_name, sns_topic_arn, + source_type=None, event_categories=None, + source_ids=None, enabled=None, tags=None): + """ + Creates an RDS event notification subscription. This action + requires a topic ARN (Amazon Resource Name) created by either + the RDS console, the SNS console, or the SNS API. To obtain an + ARN with SNS, you must create a topic in Amazon SNS and + subscribe to the topic. The ARN is displayed in the SNS + console. + + You can specify the type of source (SourceType) you want to be + notified of, provide a list of RDS sources (SourceIds) that + triggers the events, and provide a list of event categories + (EventCategories) for events you want to be notified of. For + example, you can specify SourceType = db-instance, SourceIds = + mydbinstance1, mydbinstance2 and EventCategories = + Availability, Backup. + + If you specify both the SourceType and SourceIds, such as + SourceType = db-instance and SourceIdentifier = myDBInstance1, + you will be notified of all the db-instance events for the + specified source. If you specify a SourceType but do not + specify a SourceIdentifier, you will receive notice of the + events for that source type for all your RDS sources. If you + do not specify either the SourceType nor the SourceIdentifier, + you will be notified of events generated from all RDS sources + belonging to your customer account. + + :type subscription_name: string + :param subscription_name: The name of the subscription. + Constraints: The name must be less than 255 characters. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic + created for event notification. The ARN is created by Amazon SNS + when you create a topic and subscribe to it. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a DB instance, you would set this parameter to db-instance. if + this value is not specified, all events are returned. + Valid values: db-instance | db-parameter-group | db-security-group | + db-snapshot + + :type event_categories: list + :param event_categories: A list of event categories for a SourceType + that you want to subscribe to. You can see a list of the categories + for a given SourceType in the `Events`_ topic in the Amazon RDS + User Guide or by using the **DescribeEventCategories** action. + + :type source_ids: list + :param source_ids: + The list of identifiers of the event sources for which events will be + returned. If not specified, then all sources are included in the + response. An identifier must begin with a letter and must contain + only ASCII letters, digits, and hyphens; it cannot end with a + hyphen or contain two consecutive hyphens. + + Constraints: + + + + If SourceIds are supplied, SourceType must also be provided. + + If the source type is a DB instance, then a `DBInstanceIdentifier` + must be supplied. + + If the source type is a DB security group, a `DBSecurityGroupName` + must be supplied. + + If the source type is a DB parameter group, a `DBParameterGroupName` + must be supplied. + + If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be + supplied. + + :type enabled: boolean + :param enabled: A Boolean value; set to **true** to activate the + subscription, set to **false** to create the subscription but not + active it. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SnsTopicArn': sns_topic_arn, + } + if source_type is not None: + params['SourceType'] = source_type + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if source_ids is not None: + self.build_list_params(params, + source_ids, + 'SourceIds.member') + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateEventSubscription', + verb='POST', + path='/', params=params) + + def create_option_group(self, option_group_name, engine_name, + major_engine_version, option_group_description, + tags=None): + """ + Creates a new option group. You can create up to 20 option + groups. + + :type option_group_name: string + :param option_group_name: Specifies the name of the option group to be + created. + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `myoptiongroup` + + :type engine_name: string + :param engine_name: Specifies the name of the engine that this option + group should be associated with. + + :type major_engine_version: string + :param major_engine_version: Specifies the major version of the engine + that this option group should be associated with. + + :type option_group_description: string + :param option_group_description: The description of the option group. + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'OptionGroupName': option_group_name, + 'EngineName': engine_name, + 'MajorEngineVersion': major_engine_version, + 'OptionGroupDescription': option_group_description, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateOptionGroup', + verb='POST', + path='/', params=params) + + def delete_db_instance(self, db_instance_identifier, + skip_final_snapshot=None, + final_db_snapshot_identifier=None): + """ + The DeleteDBInstance action deletes a previously provisioned + DB instance. A successful response from the web service + indicates the request was received correctly. When you delete + a DB instance, all automated backups for that instance are + deleted and cannot be recovered. Manual DB snapshots of the DB + instance to be deleted are not deleted. + + If a final DB snapshot is requested the status of the RDS + instance will be "deleting" until the DB snapshot is created. + The API action `DescribeDBInstance` is used to monitor the + status of this operation. The action cannot be canceled or + reverted once submitted. + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier for the DB instance to be deleted. This + parameter isn't case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type skip_final_snapshot: boolean + :param skip_final_snapshot: Determines whether a final DB snapshot is + created before the DB instance is deleted. If `True` is specified, + no DBSnapshot is created. If false is specified, a DB snapshot is + created before the DB instance is deleted. + The FinalDBSnapshotIdentifier parameter must be specified if + SkipFinalSnapshot is `False`. + + Default: `False` + + :type final_db_snapshot_identifier: string + :param final_db_snapshot_identifier: + The DBSnapshotIdentifier of the new DBSnapshot created when + SkipFinalSnapshot is set to `False`. + + Specifying this parameter and also setting the SkipFinalShapshot + parameter to true results in an error. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if skip_final_snapshot is not None: + params['SkipFinalSnapshot'] = str( + skip_final_snapshot).lower() + if final_db_snapshot_identifier is not None: + params['FinalDBSnapshotIdentifier'] = final_db_snapshot_identifier + return self._make_request( + action='DeleteDBInstance', + verb='POST', + path='/', params=params) + + def delete_db_parameter_group(self, db_parameter_group_name): + """ + Deletes a specified DBParameterGroup. The DBParameterGroup + cannot be associated with any RDS instances to be deleted. + The specified DB parameter group cannot be associated with any + DB instances. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be the name of an existing DB parameter group + + You cannot delete a default DB parameter group + + Cannot be associated with any DB instances + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + return self._make_request( + action='DeleteDBParameterGroup', + verb='POST', + path='/', params=params) + + def delete_db_security_group(self, db_security_group_name): + """ + Deletes a DB security group. + The specified DB security group must not be associated with + any DB instances. + + :type db_security_group_name: string + :param db_security_group_name: + The name of the DB security group to delete. + + You cannot delete the default DB security group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + Must not be "Default" + + May not contain spaces + + """ + params = {'DBSecurityGroupName': db_security_group_name, } + return self._make_request( + action='DeleteDBSecurityGroup', + verb='POST', + path='/', params=params) + + def delete_db_snapshot(self, db_snapshot_identifier): + """ + Deletes a DBSnapshot. + The DBSnapshot must be in the `available` state to be deleted. + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: The DBSnapshot identifier. + Constraints: Must be the name of an existing DB snapshot in the + `available` state. + + """ + params = {'DBSnapshotIdentifier': db_snapshot_identifier, } + return self._make_request( + action='DeleteDBSnapshot', + verb='POST', + path='/', params=params) + + def delete_db_subnet_group(self, db_subnet_group_name): + """ + Deletes a DB subnet group. + The specified database subnet group must not be associated + with any DB instances. + + :type db_subnet_group_name: string + :param db_subnet_group_name: + The name of the database subnet group to delete. + + You cannot delete the default subnet group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + """ + params = {'DBSubnetGroupName': db_subnet_group_name, } + return self._make_request( + action='DeleteDBSubnetGroup', + verb='POST', + path='/', params=params) + + def delete_event_subscription(self, subscription_name): + """ + Deletes an RDS event notification subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to delete. + + """ + params = {'SubscriptionName': subscription_name, } + return self._make_request( + action='DeleteEventSubscription', + verb='POST', + path='/', params=params) + + def delete_option_group(self, option_group_name): + """ + Deletes an existing option group. + + :type option_group_name: string + :param option_group_name: + The name of the option group to be deleted. + + You cannot delete default option groups. + + """ + params = {'OptionGroupName': option_group_name, } + return self._make_request( + action='DeleteOptionGroup', + verb='POST', + path='/', params=params) + + def describe_db_engine_versions(self, engine=None, engine_version=None, + db_parameter_group_family=None, + max_records=None, marker=None, + default_only=None, + list_supported_character_sets=None): + """ + Returns a list of the available DB engines. + + :type engine: string + :param engine: The database engine to return. + + :type engine_version: string + :param engine_version: The database engine version to return. + Example: `5.1.49` + + :type db_parameter_group_family: string + :param db_parameter_group_family: + The name of a specific DB parameter group family to return details for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + pagination token called a marker is included in the response so + that the following results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + :type default_only: boolean + :param default_only: Indicates that only the default version of the + specified engine or engine and major version combination is + returned. + + :type list_supported_character_sets: boolean + :param list_supported_character_sets: If this parameter is specified, + and if the requested engine supports the CharacterSetName parameter + for CreateDBInstance, the response includes a list of supported + character sets for each engine version. + + """ + params = {} + if engine is not None: + params['Engine'] = engine + if engine_version is not None: + params['EngineVersion'] = engine_version + if db_parameter_group_family is not None: + params['DBParameterGroupFamily'] = db_parameter_group_family + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + if default_only is not None: + params['DefaultOnly'] = str( + default_only).lower() + if list_supported_character_sets is not None: + params['ListSupportedCharacterSets'] = str( + list_supported_character_sets).lower() + return self._make_request( + action='DescribeDBEngineVersions', + verb='POST', + path='/', params=params) + + def describe_db_instances(self, db_instance_identifier=None, + filters=None, max_records=None, marker=None): + """ + Returns information about provisioned RDS instances. This API + supports pagination. + + :type db_instance_identifier: string + :param db_instance_identifier: + The user-supplied instance identifier. If this parameter is specified, + information from only the specific DB instance is returned. This + parameter isn't case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeDBInstances request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords` . + + """ + params = {} + if db_instance_identifier is not None: + params['DBInstanceIdentifier'] = db_instance_identifier + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBInstances', + verb='POST', + path='/', params=params) + + def describe_db_log_files(self, db_instance_identifier, + filename_contains=None, file_last_written=None, + file_size=None, max_records=None, marker=None): + """ + Returns a list of DB log files for the DB instance. + + :type db_instance_identifier: string + :param db_instance_identifier: + The customer-assigned name of the DB instance that contains the log + files you want to list. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type filename_contains: string + :param filename_contains: Filters the available log files for log file + names that contain the specified string. + + :type file_last_written: long + :param file_last_written: Filters the available log files for files + written since the specified date, in POSIX timestamp format. + + :type file_size: long + :param file_size: Filters the available log files for files larger than + the specified size. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified MaxRecords + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + + :type marker: string + :param marker: The pagination token provided in the previous request. + If this parameter is specified the response includes only records + beyond the marker, up to MaxRecords. + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if filename_contains is not None: + params['FilenameContains'] = filename_contains + if file_last_written is not None: + params['FileLastWritten'] = file_last_written + if file_size is not None: + params['FileSize'] = file_size + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBLogFiles', + verb='POST', + path='/', params=params) + + def describe_db_parameter_groups(self, db_parameter_group_name=None, + filters=None, max_records=None, + marker=None): + """ + Returns a list of `DBParameterGroup` descriptions. If a + `DBParameterGroupName` is specified, the list will contain + only the description of the specified DB parameter group. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of a specific DB parameter group to return details for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeDBParameterGroups` request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords`. + + """ + params = {} + if db_parameter_group_name is not None: + params['DBParameterGroupName'] = db_parameter_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBParameterGroups', + verb='POST', + path='/', params=params) + + def describe_db_parameters(self, db_parameter_group_name, source=None, + max_records=None, marker=None): + """ + Returns the detailed parameter list for a particular DB + parameter group. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of a specific DB parameter group to return details for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type source: string + :param source: The parameter types to return. + Default: All parameter types returned + + Valid Values: `user | system | engine-default` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeDBParameters` request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + if source is not None: + params['Source'] = source + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBParameters', + verb='POST', + path='/', params=params) + + def describe_db_security_groups(self, db_security_group_name=None, + filters=None, max_records=None, + marker=None): + """ + Returns a list of `DBSecurityGroup` descriptions. If a + `DBSecurityGroupName` is specified, the list will contain only + the descriptions of the specified DB security group. + + :type db_security_group_name: string + :param db_security_group_name: The name of the DB security group to + return details for. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeDBSecurityGroups request. If this parameter is specified, + the response includes only records beyond the marker, up to the + value specified by `MaxRecords`. + + """ + params = {} + if db_security_group_name is not None: + params['DBSecurityGroupName'] = db_security_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBSecurityGroups', + verb='POST', + path='/', params=params) + + def describe_db_snapshots(self, db_instance_identifier=None, + db_snapshot_identifier=None, + snapshot_type=None, filters=None, + max_records=None, marker=None): + """ + Returns information about DB snapshots. This API supports + pagination. + + :type db_instance_identifier: string + :param db_instance_identifier: + A DB instance identifier to retrieve the list of DB snapshots for. + Cannot be used in conjunction with `DBSnapshotIdentifier`. This + parameter is not case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: + A specific DB snapshot identifier to describe. Cannot be used in + conjunction with `DBInstanceIdentifier`. This value is stored as a + lowercase string. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + If this is the identifier of an automated snapshot, the + `SnapshotType` parameter must also be specified. + + :type snapshot_type: string + :param snapshot_type: The type of snapshots that will be returned. + Values can be "automated" or "manual." If not specified, the + returned results will include all snapshots types. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeDBSnapshots` request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if db_instance_identifier is not None: + params['DBInstanceIdentifier'] = db_instance_identifier + if db_snapshot_identifier is not None: + params['DBSnapshotIdentifier'] = db_snapshot_identifier + if snapshot_type is not None: + params['SnapshotType'] = snapshot_type + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBSnapshots', + verb='POST', + path='/', params=params) + + def describe_db_subnet_groups(self, db_subnet_group_name=None, + filters=None, max_records=None, + marker=None): + """ + Returns a list of DBSubnetGroup descriptions. If a + DBSubnetGroupName is specified, the list will contain only the + descriptions of the specified DBSubnetGroup. + + For an overview of CIDR ranges, go to the `Wikipedia + Tutorial`_. + + :type db_subnet_group_name: string + :param db_subnet_group_name: The name of the DB subnet group to return + details for. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeDBSubnetGroups request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBSubnetGroups', + verb='POST', + path='/', params=params) + + def describe_engine_default_parameters(self, db_parameter_group_family, + max_records=None, marker=None): + """ + Returns the default engine and system parameter information + for the specified database engine. + + :type db_parameter_group_family: string + :param db_parameter_group_family: The name of the DB parameter group + family. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeEngineDefaultParameters` request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords`. + + """ + params = { + 'DBParameterGroupFamily': db_parameter_group_family, + } + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEngineDefaultParameters', + verb='POST', + path='/', params=params) + + def describe_event_categories(self, source_type=None): + """ + Displays a list of categories for all event source types, or, + if specified, for a specified source type. You can see a list + of the event categories and source types in the ` Events`_ + topic in the Amazon RDS User Guide. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. + Valid values: db-instance | db-parameter-group | db-security-group | + db-snapshot + + """ + params = {} + if source_type is not None: + params['SourceType'] = source_type + return self._make_request( + action='DescribeEventCategories', + verb='POST', + path='/', params=params) + + def describe_event_subscriptions(self, subscription_name=None, + filters=None, max_records=None, + marker=None): + """ + Lists all the subscription descriptions for a customer + account. The description for a subscription includes + SubscriptionName, SNSTopicARN, CustomerID, SourceType, + SourceID, CreationTime, and Status. + + If you specify a SubscriptionName, lists the description for + that subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to describe. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOrderableDBInstanceOptions request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords` . + + """ + params = {} + if subscription_name is not None: + params['SubscriptionName'] = subscription_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEventSubscriptions', + verb='POST', + path='/', params=params) + + def describe_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, duration=None, + event_categories=None, max_records=None, marker=None): + """ + Returns events related to DB instances, DB security groups, DB + snapshots, and DB parameter groups for the past 14 days. + Events specific to a particular DB instance, DB security + group, database snapshot, or DB parameter group can be + obtained by providing the name as a parameter. By default, the + past hour of events are returned. + + :type source_identifier: string + :param source_identifier: + The identifier of the event source for which events will be returned. + If not specified, then all sources are included in the response. + + Constraints: + + + + If SourceIdentifier is supplied, SourceType must also be provided. + + If the source type is `DBInstance`, then a `DBInstanceIdentifier` + must be supplied. + + If the source type is `DBSecurityGroup`, a `DBSecurityGroupName` must + be supplied. + + If the source type is `DBParameterGroup`, a `DBParameterGroupName` + must be supplied. + + If the source type is `DBSnapshot`, a `DBSnapshotIdentifier` must be + supplied. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type source_type: string + :param source_type: The event source to retrieve events for. If no + value is specified, all events are returned. + + :type start_time: timestamp + :param start_time: The beginning of the time interval to retrieve + events for, specified in ISO 8601 format. For more information + about ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: 2009-07-08T18:00Z + + :type end_time: timestamp + :param end_time: The end of the time interval for which to retrieve + events, specified in ISO 8601 format. For more information about + ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: 2009-07-08T18:00Z + + :type duration: integer + :param duration: The number of minutes to retrieve events for. + Default: 60 + + :type event_categories: list + :param event_categories: A list of event categories that trigger + notifications for a event notification subscription. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeEvents request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if source_identifier is not None: + params['SourceIdentifier'] = source_identifier + if source_type is not None: + params['SourceType'] = source_type + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if duration is not None: + params['Duration'] = duration + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEvents', + verb='POST', + path='/', params=params) + + def describe_option_group_options(self, engine_name, + major_engine_version=None, + max_records=None, marker=None): + """ + Describes all available options. + + :type engine_name: string + :param engine_name: A required parameter. Options available for the + given Engine name will be described. + + :type major_engine_version: string + :param major_engine_version: If specified, filters the results to + include only options for the specified major engine version. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + """ + params = {'EngineName': engine_name, } + if major_engine_version is not None: + params['MajorEngineVersion'] = major_engine_version + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeOptionGroupOptions', + verb='POST', + path='/', params=params) + + def describe_option_groups(self, option_group_name=None, filters=None, + marker=None, max_records=None, + engine_name=None, major_engine_version=None): + """ + Describes the available option groups. + + :type option_group_name: string + :param option_group_name: The name of the option group to describe. + Cannot be supplied together with EngineName or MajorEngineVersion. + + :type filters: list + :param filters: + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOptionGroups request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type engine_name: string + :param engine_name: Filters the list of option groups to only include + groups associated with a specific database engine. + + :type major_engine_version: string + :param major_engine_version: Filters the list of option groups to only + include groups associated with a specific database engine version. + If specified, then EngineName must also be specified. + + """ + params = {} + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if marker is not None: + params['Marker'] = marker + if max_records is not None: + params['MaxRecords'] = max_records + if engine_name is not None: + params['EngineName'] = engine_name + if major_engine_version is not None: + params['MajorEngineVersion'] = major_engine_version + return self._make_request( + action='DescribeOptionGroups', + verb='POST', + path='/', params=params) + + def describe_orderable_db_instance_options(self, engine, + engine_version=None, + db_instance_class=None, + license_model=None, vpc=None, + max_records=None, marker=None): + """ + Returns a list of orderable DB instance options for the + specified engine. + + :type engine: string + :param engine: The name of the engine to retrieve DB instance options + for. + + :type engine_version: string + :param engine_version: The engine version filter value. Specify this + parameter to show only the available offerings matching the + specified engine version. + + :type db_instance_class: string + :param db_instance_class: The DB instance class filter value. Specify + this parameter to show only the available offerings matching the + specified DB instance class. + + :type license_model: string + :param license_model: The license model filter value. Specify this + parameter to show only the available offerings matching the + specified license model. + + :type vpc: boolean + :param vpc: The VPC filter value. Specify this parameter to show only + the available VPC or non-VPC offerings. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOrderableDBInstanceOptions request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords` . + + """ + params = {'Engine': engine, } + if engine_version is not None: + params['EngineVersion'] = engine_version + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if license_model is not None: + params['LicenseModel'] = license_model + if vpc is not None: + params['Vpc'] = str( + vpc).lower() + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeOrderableDBInstanceOptions', + verb='POST', + path='/', params=params) + + def describe_reserved_db_instances(self, reserved_db_instance_id=None, + reserved_db_instances_offering_id=None, + db_instance_class=None, duration=None, + product_description=None, + offering_type=None, multi_az=None, + filters=None, max_records=None, + marker=None): + """ + Returns information about reserved DB instances for this + account, or about a specified reserved DB instance. + + :type reserved_db_instance_id: string + :param reserved_db_instance_id: The reserved DB instance identifier + filter value. Specify this parameter to show only the reservation + that matches the specified reservation ID. + + :type reserved_db_instances_offering_id: string + :param reserved_db_instances_offering_id: The offering identifier + filter value. Specify this parameter to show only purchased + reservations matching the specified offering identifier. + + :type db_instance_class: string + :param db_instance_class: The DB instance class filter value. Specify + this parameter to show only those reservations matching the + specified DB instances class. + + :type duration: string + :param duration: The duration filter value, specified in years or + seconds. Specify this parameter to show only reservations for this + duration. + Valid Values: `1 | 3 | 31536000 | 94608000` + + :type product_description: string + :param product_description: The product description filter value. + Specify this parameter to show only those reservations matching the + specified product description. + + :type offering_type: string + :param offering_type: The offering type filter value. Specify this + parameter to show only the available offerings matching the + specified offering type. + Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy + Utilization" ` + + :type multi_az: boolean + :param multi_az: The Multi-AZ filter value. Specify this parameter to + show only those reservations matching the specified Multi-AZ + parameter. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + pagination token called a marker is included in the response so + that the following results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + """ + params = {} + if reserved_db_instance_id is not None: + params['ReservedDBInstanceId'] = reserved_db_instance_id + if reserved_db_instances_offering_id is not None: + params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if duration is not None: + params['Duration'] = duration + if product_description is not None: + params['ProductDescription'] = product_description + if offering_type is not None: + params['OfferingType'] = offering_type + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedDBInstances', + verb='POST', + path='/', params=params) + + def describe_reserved_db_instances_offerings(self, + reserved_db_instances_offering_id=None, + db_instance_class=None, + duration=None, + product_description=None, + offering_type=None, + multi_az=None, + max_records=None, + marker=None): + """ + Lists available reserved DB instance offerings. + + :type reserved_db_instances_offering_id: string + :param reserved_db_instances_offering_id: The offering identifier + filter value. Specify this parameter to show only the available + offering that matches the specified reservation identifier. + Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706` + + :type db_instance_class: string + :param db_instance_class: The DB instance class filter value. Specify + this parameter to show only the available offerings matching the + specified DB instance class. + + :type duration: string + :param duration: Duration filter value, specified in years or seconds. + Specify this parameter to show only reservations for this duration. + Valid Values: `1 | 3 | 31536000 | 94608000` + + :type product_description: string + :param product_description: Product description filter value. Specify + this parameter to show only the available offerings matching the + specified product description. + + :type offering_type: string + :param offering_type: The offering type filter value. Specify this + parameter to show only the available offerings matching the + specified offering type. + Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy + Utilization" ` + + :type multi_az: boolean + :param multi_az: The Multi-AZ filter value. Specify this parameter to + show only the available offerings matching the specified Multi-AZ + parameter. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + pagination token called a marker is included in the response so + that the following results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + """ + params = {} + if reserved_db_instances_offering_id is not None: + params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if duration is not None: + params['Duration'] = duration + if product_description is not None: + params['ProductDescription'] = product_description + if offering_type is not None: + params['OfferingType'] = offering_type + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedDBInstancesOfferings', + verb='POST', + path='/', params=params) + + def download_db_log_file_portion(self, db_instance_identifier, + log_file_name, marker=None, + number_of_lines=None): + """ + Downloads the last line of the specified log file. + + :type db_instance_identifier: string + :param db_instance_identifier: + The customer-assigned name of the DB instance that contains the log + files you want to list. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type log_file_name: string + :param log_file_name: The name of the log file to be downloaded. + + :type marker: string + :param marker: The pagination token provided in the previous request. + If this parameter is specified the response includes only records + beyond the marker, up to MaxRecords. + + :type number_of_lines: integer + :param number_of_lines: The number of lines remaining to be downloaded. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'LogFileName': log_file_name, + } + if marker is not None: + params['Marker'] = marker + if number_of_lines is not None: + params['NumberOfLines'] = number_of_lines + return self._make_request( + action='DownloadDBLogFilePortion', + verb='POST', + path='/', params=params) + + def list_tags_for_resource(self, resource_name): + """ + Lists all tags on an Amazon RDS resource. + + For an overview on tagging an Amazon RDS resource, see + `Tagging Amazon RDS Resources`_. + + :type resource_name: string + :param resource_name: The Amazon RDS resource with tags to be listed. + This value is an Amazon Resource Name (ARN). For information about + creating an ARN, see ` Constructing an RDS Amazon Resource Name + (ARN)`_. + + """ + params = {'ResourceName': resource_name, } + return self._make_request( + action='ListTagsForResource', + verb='POST', + path='/', params=params) + + def modify_db_instance(self, db_instance_identifier, + allocated_storage=None, db_instance_class=None, + db_security_groups=None, + vpc_security_group_ids=None, + apply_immediately=None, master_user_password=None, + db_parameter_group_name=None, + backup_retention_period=None, + preferred_backup_window=None, + preferred_maintenance_window=None, multi_az=None, + engine_version=None, + allow_major_version_upgrade=None, + auto_minor_version_upgrade=None, iops=None, + option_group_name=None, + new_db_instance_identifier=None): + """ + Modify settings for a DB instance. You can change one or more + database configuration parameters by specifying these + parameters and the new values in the request. + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier. This value is stored as a lowercase string. + + Constraints: + + + + Must be the identifier for an existing DB instance + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type allocated_storage: integer + :param allocated_storage: The new storage capacity of the RDS instance. + Changing this parameter does not result in an outage and the change + is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. + **MySQL** + + Default: Uses existing setting + + Valid Values: 5-1024 + + Constraints: Value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the + existing value are rounded up so that they are 10% greater than the + current value. + + Type: Integer + + **Oracle** + + Default: Uses existing setting + + Valid Values: 10-1024 + + Constraints: Value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the + existing value are rounded up so that they are 10% greater than the + current value. + + **SQL Server** + + Cannot be modified. + + If you choose to migrate your DB instance from using standard storage + to using Provisioned IOPS, or from using Provisioned IOPS to using + standard storage, the process can take time. The duration of the + migration depends on several factors such as database load, storage + size, storage type (standard or Provisioned IOPS), amount of IOPS + provisioned (if any), and the number of prior scale storage + operations. Typical migration times are under 24 hours, but the + process can take up to several days in some cases. During the + migration, the DB instance will be available for use, but may + experience performance degradation. While the migration takes + place, nightly backups for the instance will be suspended. No other + Amazon RDS operations can take place for the instance, including + modifying the instance, rebooting the instance, deleting the + instance, creating a read replica for the instance, and creating a + DB snapshot of the instance. + + :type db_instance_class: string + :param db_instance_class: The new compute and memory capacity of the DB + instance. To determine the instance classes that are available for + a particular DB engine, use the DescribeOrderableDBInstanceOptions + action. + Passing a value for this parameter causes an outage during the change + and is applied during the next maintenance window, unless the + `ApplyImmediately` parameter is specified as `True` for this + request. + + Default: Uses existing setting + + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge` + + :type db_security_groups: list + :param db_security_groups: + A list of DB security groups to authorize on this DB instance. Changing + this parameter does not result in an outage and the change is + asynchronously applied as soon as possible. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: + A list of EC2 VPC security groups to authorize on this DB instance. + This change is asynchronously applied as soon as possible. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type apply_immediately: boolean + :param apply_immediately: Specifies whether or not the modifications in + this request and any pending modifications are asynchronously + applied as soon as possible, regardless of the + `PreferredMaintenanceWindow` setting for the DB instance. + If this parameter is passed as `False`, changes to the DB instance are + applied on the next call to RebootDBInstance, the next maintenance + reboot, or the next failure reboot, whichever occurs first. See + each parameter to determine when a change is applied. + + Default: `False` + + :type master_user_password: string + :param master_user_password: + The new password for the DB instance master user. Can be any printable + ASCII character except "/", '"', or "@". + + Changing this parameter does not result in an outage and the change is + asynchronously applied as soon as possible. Between the time of the + request and the completion of the request, the `MasterUserPassword` + element exists in the `PendingModifiedValues` element of the + operation response. + + Default: Uses existing setting + + Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 + alphanumeric characters (Oracle), or 8 to 128 alphanumeric + characters (SQL Server). + + Amazon RDS API actions never return the password, so this action + provides a way to regain access to a master instance user if the + password is lost. + + :type db_parameter_group_name: string + :param db_parameter_group_name: The name of the DB parameter group to + apply to this DB instance. Changing this parameter does not result + in an outage and the change is applied during the next maintenance + window unless the `ApplyImmediately` parameter is set to `True` for + this request. + Default: Uses existing setting + + Constraints: The DB parameter group must be in the same DB parameter + group family as this DB instance. + + :type backup_retention_period: integer + :param backup_retention_period: + The number of days to retain automated backups. Setting this parameter + to a positive number enables backups. Setting this parameter to 0 + disables automated backups. + + Changing this parameter can result in an outage if you change from 0 to + a non-zero value or from a non-zero value to 0. These changes are + applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. If + you change the parameter from one non-zero value to another non- + zero value, the change is asynchronously applied as soon as + possible. + + Default: Uses existing setting + + Constraints: + + + + Must be a value from 0 to 8 + + Cannot be set to 0 if the DB instance is a master instance with read + replicas or if the DB instance is a read replica + + :type preferred_backup_window: string + :param preferred_backup_window: + The daily time range during which automated backups are created if + automated backups are enabled, as determined by the + `BackupRetentionPeriod`. Changing this parameter does not result in + an outage and the change is asynchronously applied as soon as + possible. + + Constraints: + + + + Must be in the format hh24:mi-hh24:mi + + Times should be Universal Time Coordinated (UTC) + + Must not conflict with the preferred maintenance window + + Must be at least 30 minutes + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur, which may result in an + outage. Changing this parameter does not result in an outage, + except in the following situation, and the change is asynchronously + applied as soon as possible. If there are pending actions that + cause a reboot, and the maintenance window is changed to include + the current time, then changing this parameter will cause a reboot + of the DB instance. If moving this window to the current time, + there must be at least 30 minutes between the current time and end + of the window to ensure pending changes are applied. + Default: Uses existing setting + + Format: ddd:hh24:mi-ddd:hh24:mi + + Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + + Constraints: Must be at least 30 minutes + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + Changing this parameter does not result in an outage and the change + is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. + Constraints: Cannot be specified if the DB instance is a read replica. + + :type engine_version: string + :param engine_version: The version number of the database engine to + upgrade to. Changing this parameter results in an outage and the + change is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. + For major version upgrades, if a non-default DB parameter group is + currently in use, a new DB parameter group in the DB parameter + group family for the new engine version must be specified. The new + DB parameter group can be the default for that DB parameter group + family. + + Example: `5.1.42` + + :type allow_major_version_upgrade: boolean + :param allow_major_version_upgrade: Indicates that major version + upgrades are allowed. Changing this parameter does not result in an + outage and the change is asynchronously applied as soon as + possible. + Constraints: This parameter must be set to true when specifying a value + for the EngineVersion parameter that is a different major version + than the DB instance's current version. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor version + upgrades will be applied automatically to the DB instance during + the maintenance window. Changing this parameter does not result in + an outage except in the following case and the change is + asynchronously applied as soon as possible. An outage will result + if this parameter is set to `True` during the maintenance window, + and a newer minor version is available, and RDS has enabled auto + patching for that engine version. + + :type iops: integer + :param iops: The new Provisioned IOPS (I/O operations per second) value + for the RDS instance. Changing this parameter does not result in an + outage and the change is applied during the next maintenance window + unless the `ApplyImmediately` parameter is set to `True` for this + request. + Default: Uses existing setting + + Constraints: Value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the + existing value are rounded up so that they are 10% greater than the + current value. + + Type: Integer + + If you choose to migrate your DB instance from using standard storage + to using Provisioned IOPS, or from using Provisioned IOPS to using + standard storage, the process can take time. The duration of the + migration depends on several factors such as database load, storage + size, storage type (standard or Provisioned IOPS), amount of IOPS + provisioned (if any), and the number of prior scale storage + operations. Typical migration times are under 24 hours, but the + process can take up to several days in some cases. During the + migration, the DB instance will be available for use, but may + experience performance degradation. While the migration takes + place, nightly backups for the instance will be suspended. No other + Amazon RDS operations can take place for the instance, including + modifying the instance, rebooting the instance, deleting the + instance, creating a read replica for the instance, and creating a + DB snapshot of the instance. + + :type option_group_name: string + :param option_group_name: Indicates that the DB instance should be + associated with the specified option group. Changing this parameter + does not result in an outage except in the following case and the + change is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. If + the parameter change results in an option group that enables OEM, + this change can cause a brief (sub-second) period during which new + connections are rejected but existing connections are not + interrupted. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type new_db_instance_identifier: string + :param new_db_instance_identifier: + The new DB instance identifier for the DB instance when renaming a DB + Instance. This value is stored as a lowercase string. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if allocated_storage is not None: + params['AllocatedStorage'] = allocated_storage + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if db_security_groups is not None: + self.build_list_params(params, + db_security_groups, + 'DBSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if apply_immediately is not None: + params['ApplyImmediately'] = str( + apply_immediately).lower() + if master_user_password is not None: + params['MasterUserPassword'] = master_user_password + if db_parameter_group_name is not None: + params['DBParameterGroupName'] = db_parameter_group_name + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window is not None: + params['PreferredBackupWindow'] = preferred_backup_window + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if engine_version is not None: + params['EngineVersion'] = engine_version + if allow_major_version_upgrade is not None: + params['AllowMajorVersionUpgrade'] = str( + allow_major_version_upgrade).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if new_db_instance_identifier is not None: + params['NewDBInstanceIdentifier'] = new_db_instance_identifier + return self._make_request( + action='ModifyDBInstance', + verb='POST', + path='/', params=params) + + def modify_db_parameter_group(self, db_parameter_group_name, parameters): + """ + Modifies the parameters of a DB parameter group. To modify + more than one parameter, submit a list of the following: + `ParameterName`, `ParameterValue`, and `ApplyMethod`. A + maximum of 20 parameters can be modified in a single request. + + The `apply-immediate` method can be used only for dynamic + parameters; the `pending-reboot` method can be used with MySQL + and Oracle DB instances for either dynamic or static + parameters. For Microsoft SQL Server DB instances, the + `pending-reboot` method can be used only for static + parameters. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be the name of an existing DB parameter group + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type parameters: list + :param parameters: + An array of parameter names, values, and the apply method for the + parameter update. At least one parameter name, value, and apply + method must be supplied; subsequent arguments are optional. A + maximum of 20 parameters may be modified in a single request. + + Valid Values (for the application method): `immediate | pending-reboot` + + You can use the immediate value with dynamic parameters only. You can + use the pending-reboot value for both dynamic and static + parameters, and changes are applied when DB instance reboots. + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod')) + return self._make_request( + action='ModifyDBParameterGroup', + verb='POST', + path='/', params=params) + + def modify_db_subnet_group(self, db_subnet_group_name, subnet_ids, + db_subnet_group_description=None): + """ + Modifies an existing DB subnet group. DB subnet groups must + contain at least one subnet in at least two AZs in the region. + + :type db_subnet_group_name: string + :param db_subnet_group_name: The name for the DB subnet group. This + value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. Must not be "Default". + + Example: `mySubnetgroup` + + :type db_subnet_group_description: string + :param db_subnet_group_description: The description for the DB subnet + group. + + :type subnet_ids: list + :param subnet_ids: The EC2 subnet IDs for the DB subnet group. + + """ + params = {'DBSubnetGroupName': db_subnet_group_name, } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + if db_subnet_group_description is not None: + params['DBSubnetGroupDescription'] = db_subnet_group_description + return self._make_request( + action='ModifyDBSubnetGroup', + verb='POST', + path='/', params=params) + + def modify_event_subscription(self, subscription_name, + sns_topic_arn=None, source_type=None, + event_categories=None, enabled=None): + """ + Modifies an existing RDS event notification subscription. Note + that you cannot modify the source identifiers using this call; + to change source identifiers for a subscription, use the + AddSourceIdentifierToSubscription and + RemoveSourceIdentifierFromSubscription calls. + + You can see a list of the event categories for a given + SourceType in the `Events`_ topic in the Amazon RDS User Guide + or by using the **DescribeEventCategories** action. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic + created for event notification. The ARN is created by Amazon SNS + when you create a topic and subscribe to it. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a DB instance, you would set this parameter to db-instance. if + this value is not specified, all events are returned. + Valid values: db-instance | db-parameter-group | db-security-group | + db-snapshot + + :type event_categories: list + :param event_categories: A list of event categories for a SourceType + that you want to subscribe to. You can see a list of the categories + for a given SourceType in the `Events`_ topic in the Amazon RDS + User Guide or by using the **DescribeEventCategories** action. + + :type enabled: boolean + :param enabled: A Boolean value; set to **true** to activate the + subscription. + + """ + params = {'SubscriptionName': subscription_name, } + if sns_topic_arn is not None: + params['SnsTopicArn'] = sns_topic_arn + if source_type is not None: + params['SourceType'] = source_type + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + return self._make_request( + action='ModifyEventSubscription', + verb='POST', + path='/', params=params) + + def modify_option_group(self, option_group_name, options_to_include=None, + options_to_remove=None, apply_immediately=None): + """ + Modifies an existing option group. + + :type option_group_name: string + :param option_group_name: The name of the option group to be modified. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type options_to_include: list + :param options_to_include: Options in this list are added to the option + group or, if already present, the specified configuration is used + to update the existing configuration. + + :type options_to_remove: list + :param options_to_remove: Options in this list are removed from the + option group. + + :type apply_immediately: boolean + :param apply_immediately: Indicates whether the changes should be + applied immediately, or during the next maintenance window for each + instance associated with the option group. + + """ + params = {'OptionGroupName': option_group_name, } + if options_to_include is not None: + self.build_complex_list_params( + params, options_to_include, + 'OptionsToInclude.member', + ('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')) + if options_to_remove is not None: + self.build_list_params(params, + options_to_remove, + 'OptionsToRemove.member') + if apply_immediately is not None: + params['ApplyImmediately'] = str( + apply_immediately).lower() + return self._make_request( + action='ModifyOptionGroup', + verb='POST', + path='/', params=params) + + def promote_read_replica(self, db_instance_identifier, + backup_retention_period=None, + preferred_backup_window=None): + """ + Promotes a read replica DB instance to a standalone DB + instance. + + :type db_instance_identifier: string + :param db_instance_identifier: The DB instance identifier. This value + is stored as a lowercase string. + Constraints: + + + + Must be the identifier for an existing read replica DB instance + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: mydbinstance + + :type backup_retention_period: integer + :param backup_retention_period: + The number of days to retain automated backups. Setting this parameter + to a positive number enables backups. Setting this parameter to 0 + disables automated backups. + + Default: 1 + + Constraints: + + + + Must be a value from 0 to 8 + + :type preferred_backup_window: string + :param preferred_backup_window: The daily time range during which + automated backups are created if automated backups are enabled, + using the `BackupRetentionPeriod` parameter. + Default: A 30-minute window selected at random from an 8-hour block of + time per region. See the Amazon RDS User Guide for the time blocks + for each region from which the default backup windows are assigned. + + Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be + Universal Time Coordinated (UTC). Must not conflict with the + preferred maintenance window. Must be at least 30 minutes. + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window is not None: + params['PreferredBackupWindow'] = preferred_backup_window + return self._make_request( + action='PromoteReadReplica', + verb='POST', + path='/', params=params) + + def purchase_reserved_db_instances_offering(self, + reserved_db_instances_offering_id, + reserved_db_instance_id=None, + db_instance_count=None, + tags=None): + """ + Purchases a reserved DB instance offering. + + :type reserved_db_instances_offering_id: string + :param reserved_db_instances_offering_id: The ID of the Reserved DB + instance offering to purchase. + Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + + :type reserved_db_instance_id: string + :param reserved_db_instance_id: Customer-specified identifier to track + this reservation. + Example: myreservationID + + :type db_instance_count: integer + :param db_instance_count: The number of instances to reserve. + Default: `1` + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'ReservedDBInstancesOfferingId': reserved_db_instances_offering_id, + } + if reserved_db_instance_id is not None: + params['ReservedDBInstanceId'] = reserved_db_instance_id + if db_instance_count is not None: + params['DBInstanceCount'] = db_instance_count + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='PurchaseReservedDBInstancesOffering', + verb='POST', + path='/', params=params) + + def reboot_db_instance(self, db_instance_identifier, force_failover=None): + """ + Rebooting a DB instance restarts the database engine service. + A reboot also applies to the DB instance any modifications to + the associated DB parameter group that were pending. Rebooting + a DB instance results in a momentary outage of the instance, + during which the DB instance status is set to rebooting. If + the RDS instance is configured for MultiAZ, it is possible + that the reboot will be conducted through a failover. An + Amazon RDS event is created when the reboot is completed. + + If your DB instance is deployed in multiple Availability + Zones, you can force a failover from one AZ to the other + during the reboot. You might force a failover to test the + availability of your DB instance deployment or to restore + operations to the original AZ after a failover occurs. + + The time required to reboot is a function of the specific + database engine's crash recovery process. To improve the + reboot time, we recommend that you reduce database activities + as much as possible during the reboot process to reduce + rollback activity for in-transit transactions. + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier. This parameter is stored as a lowercase + string. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type force_failover: boolean + :param force_failover: When `True`, the reboot will be conducted + through a MultiAZ failover. + Constraint: You cannot specify `True` if the instance is not configured + for MultiAZ. + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if force_failover is not None: + params['ForceFailover'] = str( + force_failover).lower() + return self._make_request( + action='RebootDBInstance', + verb='POST', + path='/', params=params) + + def remove_source_identifier_from_subscription(self, subscription_name, + source_identifier): + """ + Removes a source identifier from an existing RDS event + notification subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to remove a source identifier from. + + :type source_identifier: string + :param source_identifier: The source identifier to be removed from the + subscription, such as the **DB instance identifier** for a DB + instance or the name of a security group. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SourceIdentifier': source_identifier, + } + return self._make_request( + action='RemoveSourceIdentifierFromSubscription', + verb='POST', + path='/', params=params) + + def remove_tags_from_resource(self, resource_name, tag_keys): + """ + Removes metadata tags from an Amazon RDS resource. + + For an overview on tagging an Amazon RDS resource, see + `Tagging Amazon RDS Resources`_. + + :type resource_name: string + :param resource_name: The Amazon RDS resource the tags will be removed + from. This value is an Amazon Resource Name (ARN). For information + about creating an ARN, see ` Constructing an RDS Amazon Resource + Name (ARN)`_. + + :type tag_keys: list + :param tag_keys: The tag key (name) of the tag to be removed. + + """ + params = {'ResourceName': resource_name, } + self.build_list_params(params, + tag_keys, + 'TagKeys.member') + return self._make_request( + action='RemoveTagsFromResource', + verb='POST', + path='/', params=params) + + def reset_db_parameter_group(self, db_parameter_group_name, + reset_all_parameters=None, parameters=None): + """ + Modifies the parameters of a DB parameter group to the + engine/system default value. To reset specific parameters + submit a list of the following: `ParameterName` and + `ApplyMethod`. To reset the entire DB parameter group, specify + the `DBParameterGroup` name and `ResetAllParameters` + parameters. When resetting the entire group, dynamic + parameters are updated immediately and static parameters are + set to `pending-reboot` to take effect on the next DB instance + restart or `RebootDBInstance` request. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type reset_all_parameters: boolean + :param reset_all_parameters: Specifies whether ( `True`) or not ( + `False`) to reset all parameters in the DB parameter group to + default values. + Default: `True` + + :type parameters: list + :param parameters: An array of parameter names, values, and the apply + method for the parameter update. At least one parameter name, + value, and apply method must be supplied; subsequent arguments are + optional. A maximum of 20 parameters may be modified in a single + request. + **MySQL** + + Valid Values (for Apply method): `immediate` | `pending-reboot` + + You can use the immediate value with dynamic parameters only. You can + use the `pending-reboot` value for both dynamic and static + parameters, and changes are applied when DB instance reboots. + + **Oracle** + + Valid Values (for Apply method): `pending-reboot` + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + if reset_all_parameters is not None: + params['ResetAllParameters'] = str( + reset_all_parameters).lower() + if parameters is not None: + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod')) + return self._make_request( + action='ResetDBParameterGroup', + verb='POST', + path='/', params=params) + + def restore_db_instance_from_db_snapshot(self, db_instance_identifier, + db_snapshot_identifier, + db_instance_class=None, + port=None, + availability_zone=None, + db_subnet_group_name=None, + multi_az=None, + publicly_accessible=None, + auto_minor_version_upgrade=None, + license_model=None, + db_name=None, engine=None, + iops=None, + option_group_name=None, + tags=None): + """ + Creates a new DB instance from a DB snapshot. The target + database is created from the source database restore point + with the same configuration as the original source database, + except that the new RDS instance is created with the default + security group. + + :type db_instance_identifier: string + :param db_instance_identifier: + The identifier for the DB snapshot to restore from. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: Name of the DB instance to create from + the DB snapshot. This parameter isn't case sensitive. + Constraints: + + + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-snapshot-id` + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the Amazon + RDS DB instance. + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge` + + :type port: integer + :param port: The port number on which the database accepts connections. + Default: The same port as the original DB instance + + Constraints: Value must be `1150-65535` + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone that the database + instance will be created in. + Default: A random, system-chosen Availability Zone. + + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to `True`. + + Example: `us-east-1a` + + :type db_subnet_group_name: string + :param db_subnet_group_name: The DB subnet group name to use for the + new instance. + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to `True`. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor version + upgrades will be applied automatically to the DB instance during + the maintenance window. + + :type license_model: string + :param license_model: License model information for the restored DB + instance. + Default: Same as source. + + Valid values: `license-included` | `bring-your-own-license` | `general- + public-license` + + :type db_name: string + :param db_name: + The database name for the restored DB instance. + + + This parameter doesn't apply to the MySQL engine. + + :type engine: string + :param engine: The database engine to use for the new instance. + Default: The same as source + + Constraint: Must be compatible with the engine of the source + + Example: `oracle-ee` + + :type iops: integer + :param iops: Specifies the amount of provisioned IOPS for the DB + instance, expressed in I/O operations per second. If this parameter + is not specified, the IOPS value will be taken from the backup. If + this parameter is set to 0, the new instance will be converted to a + non-PIOPS instance, which will take additional time, though your DB + instance will be available for connections before the conversion + starts. + Constraints: Must be an integer greater than 1000. + + :type option_group_name: string + :param option_group_name: The name of the option group to be used for + the restored DB instance. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'DBSnapshotIdentifier': db_snapshot_identifier, + } + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if port is not None: + params['Port'] = port + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if license_model is not None: + params['LicenseModel'] = license_model + if db_name is not None: + params['DBName'] = db_name + if engine is not None: + params['Engine'] = engine + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='RestoreDBInstanceFromDBSnapshot', + verb='POST', + path='/', params=params) + + def restore_db_instance_to_point_in_time(self, + source_db_instance_identifier, + target_db_instance_identifier, + restore_time=None, + use_latest_restorable_time=None, + db_instance_class=None, + port=None, + availability_zone=None, + db_subnet_group_name=None, + multi_az=None, + publicly_accessible=None, + auto_minor_version_upgrade=None, + license_model=None, + db_name=None, engine=None, + iops=None, + option_group_name=None, + tags=None): + """ + Restores a DB instance to an arbitrary point-in-time. Users + can restore to any point in time before the + latestRestorableTime for up to backupRetentionPeriod days. The + target database is created from the source database with the + same configuration as the original database except that the DB + instance is created with the default DB security group. + + :type source_db_instance_identifier: string + :param source_db_instance_identifier: + The identifier of the source DB instance from which to restore. + + Constraints: + + + + Must be the identifier of an existing database instance + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type target_db_instance_identifier: string + :param target_db_instance_identifier: + The name of the new database instance to be created. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type restore_time: timestamp + :param restore_time: The date and time to restore from. + Valid Values: Value must be a UTC time + + Constraints: + + + + Must be before the latest restorable time for the DB instance + + Cannot be specified if UseLatestRestorableTime parameter is true + + + Example: `2009-09-07T23:45:00Z` + + :type use_latest_restorable_time: boolean + :param use_latest_restorable_time: Specifies whether ( `True`) or not ( + `False`) the DB instance is restored from the latest backup time. + Default: `False` + + Constraints: Cannot be specified if RestoreTime parameter is provided. + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the Amazon + RDS DB instance. + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge` + + Default: The same DBInstanceClass as the original DB instance. + + :type port: integer + :param port: The port number on which the database accepts connections. + Constraints: Value must be `1150-65535` + + Default: The same port as the original DB instance. + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone that the database + instance will be created in. + Default: A random, system-chosen Availability Zone. + + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to true. + + Example: `us-east-1a` + + :type db_subnet_group_name: string + :param db_subnet_group_name: The DB subnet group name to use for the + new instance. + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to `True`. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor version + upgrades will be applied automatically to the DB instance during + the maintenance window. + + :type license_model: string + :param license_model: License model information for the restored DB + instance. + Default: Same as source. + + Valid values: `license-included` | `bring-your-own-license` | `general- + public-license` + + :type db_name: string + :param db_name: + The database name for the restored DB instance. + + + This parameter is not used for the MySQL engine. + + :type engine: string + :param engine: The database engine to use for the new instance. + Default: The same as source + + Constraint: Must be compatible with the engine of the source + + Example: `oracle-ee` + + :type iops: integer + :param iops: The amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for the DB instance. + Constraints: Must be an integer greater than 1000. + + :type option_group_name: string + :param option_group_name: The name of the option group to be used for + the restored DB instance. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type tags: list + :param tags: A list of tags. + + """ + params = { + 'SourceDBInstanceIdentifier': source_db_instance_identifier, + 'TargetDBInstanceIdentifier': target_db_instance_identifier, + } + if restore_time is not None: + params['RestoreTime'] = restore_time + if use_latest_restorable_time is not None: + params['UseLatestRestorableTime'] = str( + use_latest_restorable_time).lower() + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if port is not None: + params['Port'] = port + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if license_model is not None: + params['LicenseModel'] = license_model + if db_name is not None: + params['DBName'] = db_name + if engine is not None: + params['Engine'] = engine + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='RestoreDBInstanceToPointInTime', + verb='POST', + path='/', params=params) + + def revoke_db_security_group_ingress(self, db_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_id=None, + ec2_security_group_owner_id=None): + """ + Revokes ingress from a DBSecurityGroup for previously + authorized IP ranges or EC2 or VPC Security Groups. Required + parameters for this API are one of CIDRIP, EC2SecurityGroupId + for VPC, or (EC2SecurityGroupOwnerId and either + EC2SecurityGroupName or EC2SecurityGroupId). + + :type db_security_group_name: string + :param db_security_group_name: The name of the DB security group to + revoke ingress from. + + :type cidrip: string + :param cidrip: The IP range to revoke access from. Must be a valid CIDR + range. If `CIDRIP` is specified, `EC2SecurityGroupName`, + `EC2SecurityGroupId` and `EC2SecurityGroupOwnerId` cannot be + provided. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 security group to + revoke access from. For VPC DB security groups, + `EC2SecurityGroupId` must be provided. Otherwise, + EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or + `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_id: string + :param ec2_security_group_id: The id of the EC2 security group to + revoke access from. For VPC DB security groups, + `EC2SecurityGroupId` must be provided. Otherwise, + EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or + `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS Account Number of the owner + of the EC2 security group specified in the `EC2SecurityGroupName` + parameter. The AWS Access Key ID is not an acceptable value. For + VPC DB security groups, `EC2SecurityGroupId` must be provided. + Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + """ + params = {'DBSecurityGroupName': db_security_group_name, } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_id is not None: + params['EC2SecurityGroupId'] = ec2_security_group_id + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='RevokeDBSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/redshift/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/redshift/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f98ececd7554ccabb702bac75d4d65942e52cb71 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/redshift/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the AWS Redshift service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.redshift.layer1 import RedshiftConnection + return get_regions('redshift', connection_cls=RedshiftConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/redshift/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/redshift/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..70339225fffb6fed5cdc099252423350440f1b6c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/redshift/exceptions.py @@ -0,0 +1,459 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class ClusterNotFoundFault(JSONResponseError): + pass + + +class InvalidClusterSnapshotStateFault(JSONResponseError): + pass + + +class ClusterSnapshotNotFoundFault(JSONResponseError): + pass + + +class ClusterSecurityGroupQuotaExceededFault(JSONResponseError): + pass + + +class ReservedNodeOfferingNotFoundFault(JSONResponseError): + pass + + +class InvalidSubnet(JSONResponseError): + pass + + +class ClusterSubnetGroupQuotaExceededFault(JSONResponseError): + pass + + +class InvalidClusterStateFault(JSONResponseError): + pass + + +class InvalidClusterParameterGroupStateFault(JSONResponseError): + pass + + +class ClusterParameterGroupAlreadyExistsFault(JSONResponseError): + pass + + +class InvalidClusterSecurityGroupStateFault(JSONResponseError): + pass + + +class InvalidRestoreFault(JSONResponseError): + pass + + +class AuthorizationNotFoundFault(JSONResponseError): + pass + + +class ResizeNotFoundFault(JSONResponseError): + pass + + +class NumberOfNodesQuotaExceededFault(JSONResponseError): + pass + + +class ClusterSnapshotAlreadyExistsFault(JSONResponseError): + pass + + +class AuthorizationQuotaExceededFault(JSONResponseError): + pass + + +class AuthorizationAlreadyExistsFault(JSONResponseError): + pass + + +class ClusterSnapshotQuotaExceededFault(JSONResponseError): + pass + + +class ReservedNodeNotFoundFault(JSONResponseError): + pass + + +class ReservedNodeAlreadyExistsFault(JSONResponseError): + pass + + +class ClusterSecurityGroupAlreadyExistsFault(JSONResponseError): + pass + + +class ClusterParameterGroupNotFoundFault(JSONResponseError): + pass + + +class ReservedNodeQuotaExceededFault(JSONResponseError): + pass + + +class ClusterQuotaExceededFault(JSONResponseError): + pass + + +class ClusterSubnetQuotaExceededFault(JSONResponseError): + pass + + +class UnsupportedOptionFault(JSONResponseError): + pass + + +class InvalidVPCNetworkStateFault(JSONResponseError): + pass + + +class ClusterSecurityGroupNotFoundFault(JSONResponseError): + pass + + +class InvalidClusterSubnetGroupStateFault(JSONResponseError): + pass + + +class ClusterSubnetGroupAlreadyExistsFault(JSONResponseError): + pass + + +class NumberOfNodesPerClusterLimitExceededFault(JSONResponseError): + pass + + +class ClusterSubnetGroupNotFoundFault(JSONResponseError): + pass + + +class ClusterParameterGroupQuotaExceededFault(JSONResponseError): + pass + + +class ClusterAlreadyExistsFault(JSONResponseError): + pass + + +class InsufficientClusterCapacityFault(JSONResponseError): + pass + + +class InvalidClusterSubnetStateFault(JSONResponseError): + pass + + +class SubnetAlreadyInUse(JSONResponseError): + pass + + +class InvalidParameterCombinationFault(JSONResponseError): + pass + + +class AccessToSnapshotDeniedFault(JSONResponseError): + pass + + +class UnauthorizedOperationFault(JSONResponseError): + pass + + +class SnapshotCopyAlreadyDisabled(JSONResponseError): + pass + + +class ClusterNotFound(JSONResponseError): + pass + + +class UnknownSnapshotCopyRegion(JSONResponseError): + pass + + +class InvalidClusterSubnetState(JSONResponseError): + pass + + +class ReservedNodeQuotaExceeded(JSONResponseError): + pass + + +class InvalidClusterState(JSONResponseError): + pass + + +class HsmClientCertificateQuotaExceeded(JSONResponseError): + pass + + +class SubscriptionCategoryNotFound(JSONResponseError): + pass + + +class HsmClientCertificateNotFound(JSONResponseError): + pass + + +class SubscriptionEventIdNotFound(JSONResponseError): + pass + + +class ClusterSecurityGroupAlreadyExists(JSONResponseError): + pass + + +class HsmConfigurationAlreadyExists(JSONResponseError): + pass + + +class NumberOfNodesQuotaExceeded(JSONResponseError): + pass + + +class ReservedNodeOfferingNotFound(JSONResponseError): + pass + + +class BucketNotFound(JSONResponseError): + pass + + +class InsufficientClusterCapacity(JSONResponseError): + pass + + +class InvalidRestore(JSONResponseError): + pass + + +class UnauthorizedOperation(JSONResponseError): + pass + + +class ClusterQuotaExceeded(JSONResponseError): + pass + + +class InvalidVPCNetworkState(JSONResponseError): + pass + + +class ClusterSnapshotNotFound(JSONResponseError): + pass + + +class AuthorizationQuotaExceeded(JSONResponseError): + pass + + +class InvalidHsmClientCertificateState(JSONResponseError): + pass + + +class SNSTopicArnNotFound(JSONResponseError): + pass + + +class ResizeNotFound(JSONResponseError): + pass + + +class ClusterSubnetGroupNotFound(JSONResponseError): + pass + + +class SNSNoAuthorization(JSONResponseError): + pass + + +class ClusterSnapshotQuotaExceeded(JSONResponseError): + pass + + +class AccessToSnapshotDenied(JSONResponseError): + pass + + +class InvalidClusterSecurityGroupState(JSONResponseError): + pass + + +class NumberOfNodesPerClusterLimitExceeded(JSONResponseError): + pass + + +class ClusterSubnetQuotaExceeded(JSONResponseError): + pass + + +class SNSInvalidTopic(JSONResponseError): + pass + + +class ClusterSecurityGroupNotFound(JSONResponseError): + pass + + +class InvalidElasticIp(JSONResponseError): + pass + + +class InvalidClusterParameterGroupState(JSONResponseError): + pass + + +class InvalidHsmConfigurationState(JSONResponseError): + pass + + + +class ClusterAlreadyExists(JSONResponseError): + pass + + +class HsmConfigurationQuotaExceeded(JSONResponseError): + pass + + +class ClusterSnapshotAlreadyExists(JSONResponseError): + pass + + +class SubscriptionSeverityNotFound(JSONResponseError): + pass + + +class SourceNotFound(JSONResponseError): + pass + + +class ReservedNodeAlreadyExists(JSONResponseError): + pass + + +class ClusterSubnetGroupQuotaExceeded(JSONResponseError): + pass + + +class ClusterParameterGroupNotFound(JSONResponseError): + pass + + +class InvalidS3BucketName(JSONResponseError): + pass + + +class InvalidS3KeyPrefix(JSONResponseError): + pass + + +class SubscriptionAlreadyExist(JSONResponseError): + pass + + +class HsmConfigurationNotFound(JSONResponseError): + pass + + +class AuthorizationNotFound(JSONResponseError): + pass + + +class ClusterSecurityGroupQuotaExceeded(JSONResponseError): + pass + + +class EventSubscriptionQuotaExceeded(JSONResponseError): + pass + + +class AuthorizationAlreadyExists(JSONResponseError): + pass + + +class InvalidClusterSnapshotState(JSONResponseError): + pass + + +class ClusterParameterGroupQuotaExceeded(JSONResponseError): + pass + + +class SnapshotCopyDisabled(JSONResponseError): + pass + + +class ClusterSubnetGroupAlreadyExists(JSONResponseError): + pass + + +class ReservedNodeNotFound(JSONResponseError): + pass + + +class HsmClientCertificateAlreadyExists(JSONResponseError): + pass + + +class InvalidClusterSubnetGroupState(JSONResponseError): + pass + + +class SubscriptionNotFound(JSONResponseError): + pass + + +class InsufficientS3BucketPolicy(JSONResponseError): + pass + + +class ClusterParameterGroupAlreadyExists(JSONResponseError): + pass + + +class UnsupportedOption(JSONResponseError): + pass + + +class CopyToRegionDisabled(JSONResponseError): + pass + + +class SnapshotCopyAlreadyEnabled(JSONResponseError): + pass + + +class IncompatibleOrderableOptions(JSONResponseError): + pass + + +class InvalidSubscriptionState(JSONResponseError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/redshift/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/redshift/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..be1529fdf851572f1a99f11d2c6d0ffb585771be --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/redshift/layer1.py @@ -0,0 +1,3097 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.redshift import exceptions + + +class RedshiftConnection(AWSQueryConnection): + """ + Amazon Redshift **Overview** + This is an interface reference for Amazon Redshift. It contains + documentation for one of the programming or command line + interfaces you can use to manage Amazon Redshift clusters. Note + that Amazon Redshift is asynchronous, which means that some + interfaces may require techniques, such as polling or asynchronous + callback handlers, to determine when a command has been applied. + In this reference, the parameter descriptions indicate whether a + change is applied immediately, on the next instance reboot, or + during the next maintenance window. For a summary of the Amazon + Redshift cluster management interfaces, go to `Using the Amazon + Redshift Management Interfaces `_. + + Amazon Redshift manages all the work of setting up, operating, and + scaling a data warehouse: provisioning capacity, monitoring and + backing up the cluster, and applying patches and upgrades to the + Amazon Redshift engine. You can focus on using your data to + acquire new insights for your business and customers. + + If you are a first-time user of Amazon Redshift, we recommend that + you begin by reading the The `Amazon Redshift Getting Started + Guide`_ + + If you are a database developer, the `Amazon Redshift Database + Developer Guide`_ explains how to design, build, query, and + maintain the databases that make up your data warehouse. + """ + APIVersion = "2012-12-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "redshift.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "SnapshotCopyAlreadyDisabled": exceptions.SnapshotCopyAlreadyDisabled, + "ClusterNotFound": exceptions.ClusterNotFound, + "UnknownSnapshotCopyRegion": exceptions.UnknownSnapshotCopyRegion, + "InvalidClusterSubnetState": exceptions.InvalidClusterSubnetState, + "InvalidSubnet": exceptions.InvalidSubnet, + "ReservedNodeQuotaExceeded": exceptions.ReservedNodeQuotaExceeded, + "InvalidClusterState": exceptions.InvalidClusterState, + "HsmClientCertificateQuotaExceeded": exceptions.HsmClientCertificateQuotaExceeded, + "SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound, + "HsmClientCertificateNotFound": exceptions.HsmClientCertificateNotFound, + "SubscriptionEventIdNotFound": exceptions.SubscriptionEventIdNotFound, + "ClusterSecurityGroupAlreadyExists": exceptions.ClusterSecurityGroupAlreadyExists, + "HsmConfigurationAlreadyExists": exceptions.HsmConfigurationAlreadyExists, + "NumberOfNodesQuotaExceeded": exceptions.NumberOfNodesQuotaExceeded, + "ReservedNodeOfferingNotFound": exceptions.ReservedNodeOfferingNotFound, + "BucketNotFound": exceptions.BucketNotFound, + "InsufficientClusterCapacity": exceptions.InsufficientClusterCapacity, + "InvalidRestore": exceptions.InvalidRestore, + "UnauthorizedOperation": exceptions.UnauthorizedOperation, + "ClusterQuotaExceeded": exceptions.ClusterQuotaExceeded, + "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState, + "ClusterSnapshotNotFound": exceptions.ClusterSnapshotNotFound, + "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded, + "InvalidHsmClientCertificateState": exceptions.InvalidHsmClientCertificateState, + "SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound, + "ResizeNotFound": exceptions.ResizeNotFound, + "ClusterSubnetGroupNotFound": exceptions.ClusterSubnetGroupNotFound, + "SNSNoAuthorization": exceptions.SNSNoAuthorization, + "ClusterSnapshotQuotaExceeded": exceptions.ClusterSnapshotQuotaExceeded, + "AccessToSnapshotDenied": exceptions.AccessToSnapshotDenied, + "InvalidClusterSecurityGroupState": exceptions.InvalidClusterSecurityGroupState, + "NumberOfNodesPerClusterLimitExceeded": exceptions.NumberOfNodesPerClusterLimitExceeded, + "ClusterSubnetQuotaExceeded": exceptions.ClusterSubnetQuotaExceeded, + "SNSInvalidTopic": exceptions.SNSInvalidTopic, + "ClusterSecurityGroupNotFound": exceptions.ClusterSecurityGroupNotFound, + "InvalidElasticIp": exceptions.InvalidElasticIp, + "InvalidClusterParameterGroupState": exceptions.InvalidClusterParameterGroupState, + "InvalidHsmConfigurationState": exceptions.InvalidHsmConfigurationState, + "ClusterAlreadyExists": exceptions.ClusterAlreadyExists, + "HsmConfigurationQuotaExceeded": exceptions.HsmConfigurationQuotaExceeded, + "ClusterSnapshotAlreadyExists": exceptions.ClusterSnapshotAlreadyExists, + "SubscriptionSeverityNotFound": exceptions.SubscriptionSeverityNotFound, + "SourceNotFound": exceptions.SourceNotFound, + "ReservedNodeAlreadyExists": exceptions.ReservedNodeAlreadyExists, + "ClusterSubnetGroupQuotaExceeded": exceptions.ClusterSubnetGroupQuotaExceeded, + "ClusterParameterGroupNotFound": exceptions.ClusterParameterGroupNotFound, + "InvalidS3BucketName": exceptions.InvalidS3BucketName, + "InvalidS3KeyPrefix": exceptions.InvalidS3KeyPrefix, + "SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist, + "HsmConfigurationNotFound": exceptions.HsmConfigurationNotFound, + "InvalidSubscriptionState": exceptions.InvalidSubscriptionState, + "AuthorizationNotFound": exceptions.AuthorizationNotFound, + "ClusterSecurityGroupQuotaExceeded": exceptions.ClusterSecurityGroupQuotaExceeded, + "SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse, + "EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded, + "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists, + "InvalidClusterSnapshotState": exceptions.InvalidClusterSnapshotState, + "ClusterParameterGroupQuotaExceeded": exceptions.ClusterParameterGroupQuotaExceeded, + "SnapshotCopyDisabled": exceptions.SnapshotCopyDisabled, + "ClusterSubnetGroupAlreadyExists": exceptions.ClusterSubnetGroupAlreadyExists, + "ReservedNodeNotFound": exceptions.ReservedNodeNotFound, + "HsmClientCertificateAlreadyExists": exceptions.HsmClientCertificateAlreadyExists, + "InvalidClusterSubnetGroupState": exceptions.InvalidClusterSubnetGroupState, + "SubscriptionNotFound": exceptions.SubscriptionNotFound, + "InsufficientS3BucketPolicy": exceptions.InsufficientS3BucketPolicy, + "ClusterParameterGroupAlreadyExists": exceptions.ClusterParameterGroupAlreadyExists, + "UnsupportedOption": exceptions.UnsupportedOption, + "CopyToRegionDisabled": exceptions.CopyToRegionDisabled, + "SnapshotCopyAlreadyEnabled": exceptions.SnapshotCopyAlreadyEnabled, + "IncompatibleOrderableOptions": exceptions.IncompatibleOrderableOptions, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(RedshiftConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def authorize_cluster_security_group_ingress(self, + cluster_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_owner_id=None): + """ + Adds an inbound (ingress) rule to an Amazon Redshift security + group. Depending on whether the application accessing your + cluster is running on the Internet or an EC2 instance, you can + authorize inbound access to either a Classless Interdomain + Routing (CIDR) IP address range or an EC2 security group. You + can add as many as 20 ingress rules to an Amazon Redshift + security group. + + For an overview of CIDR blocks, see the Wikipedia article on + `Classless Inter-Domain Routing`_. + + You must also associate the security group with a cluster so + that clients running on these IP addresses or the EC2 instance + are authorized to connect to the cluster. For information + about managing security groups, go to `Working with Security + Groups`_ in the Amazon Redshift Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name of the security group to + which the ingress rule is added. + + :type cidrip: string + :param cidrip: The IP range to be added the Amazon Redshift security + group. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The EC2 security group to be added the + Amazon Redshift security group. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS account number of the owner + of the security group specified by the EC2SecurityGroupName + parameter. The AWS Access Key ID is not an acceptable value. + Example: `111122223333` + + """ + params = { + 'ClusterSecurityGroupName': cluster_security_group_name, + } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='AuthorizeClusterSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def authorize_snapshot_access(self, snapshot_identifier, + account_with_restore_access, + snapshot_cluster_identifier=None): + """ + Authorizes the specified AWS customer account to restore the + specified snapshot. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type snapshot_identifier: string + :param snapshot_identifier: The identifier of the snapshot the account + is authorized to restore. + + :type snapshot_cluster_identifier: string + :param snapshot_cluster_identifier: The identifier of the cluster the + snapshot was created from. This parameter is required if your IAM + user has a policy containing a snapshot resource element that + specifies anything other than * for the cluster name. + + :type account_with_restore_access: string + :param account_with_restore_access: The identifier of the AWS customer + account authorized to restore the specified snapshot. + + """ + params = { + 'SnapshotIdentifier': snapshot_identifier, + 'AccountWithRestoreAccess': account_with_restore_access, + } + if snapshot_cluster_identifier is not None: + params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier + return self._make_request( + action='AuthorizeSnapshotAccess', + verb='POST', + path='/', params=params) + + def copy_cluster_snapshot(self, source_snapshot_identifier, + target_snapshot_identifier, + source_snapshot_cluster_identifier=None): + """ + Copies the specified automated cluster snapshot to a new + manual cluster snapshot. The source must be an automated + snapshot and it must be in the available state. + + When you delete a cluster, Amazon Redshift deletes any + automated snapshots of the cluster. Also, when the retention + period of the snapshot expires, Amazon Redshift automatically + deletes it. If you want to keep an automated snapshot for a + longer period, you can make a manual copy of the snapshot. + Manual snapshots are retained until you delete them. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type source_snapshot_identifier: string + :param source_snapshot_identifier: + The identifier for the source snapshot. + + Constraints: + + + + Must be the identifier for a valid automated snapshot whose state is + `available`. + + :type source_snapshot_cluster_identifier: string + :param source_snapshot_cluster_identifier: + The identifier of the cluster the source snapshot was created from. + This parameter is required if your IAM user has a policy containing + a snapshot resource element that specifies anything other than * + for the cluster name. + + Constraints: + + + + Must be the identifier for a valid cluster. + + :type target_snapshot_identifier: string + :param target_snapshot_identifier: + The identifier given to the new manual snapshot. + + Constraints: + + + + Cannot be null, empty, or blank. + + Must contain from 1 to 255 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique for the AWS account that is making the request. + + """ + params = { + 'SourceSnapshotIdentifier': source_snapshot_identifier, + 'TargetSnapshotIdentifier': target_snapshot_identifier, + } + if source_snapshot_cluster_identifier is not None: + params['SourceSnapshotClusterIdentifier'] = source_snapshot_cluster_identifier + return self._make_request( + action='CopyClusterSnapshot', + verb='POST', + path='/', params=params) + + def create_cluster(self, cluster_identifier, node_type, master_username, + master_user_password, db_name=None, cluster_type=None, + cluster_security_groups=None, + vpc_security_group_ids=None, + cluster_subnet_group_name=None, + availability_zone=None, + preferred_maintenance_window=None, + cluster_parameter_group_name=None, + automated_snapshot_retention_period=None, port=None, + cluster_version=None, allow_version_upgrade=None, + number_of_nodes=None, publicly_accessible=None, + encrypted=None, + hsm_client_certificate_identifier=None, + hsm_configuration_identifier=None, elastic_ip=None): + """ + Creates a new cluster. To create the cluster in virtual + private cloud (VPC), you must provide cluster subnet group + name. If you don't provide a cluster subnet group name or the + cluster security group parameter, Amazon Redshift creates a + non-VPC cluster, it associates the default cluster security + group with the cluster. For more information about managing + clusters, go to `Amazon Redshift Clusters`_ in the Amazon + Redshift Management Guide . + + :type db_name: string + :param db_name: + The name of the first database to be created when the cluster is + created. + + To create additional databases after the cluster is created, connect to + the cluster with a SQL client and use SQL commands to create a + database. For more information, go to `Create a Database`_ in the + Amazon Redshift Database Developer Guide. + + Default: `dev` + + Constraints: + + + + Must contain 1 to 64 alphanumeric characters. + + Must contain only lowercase letters. + + Cannot be a word that is reserved by the service. A list of reserved + words can be found in `Reserved Words`_ in the Amazon Redshift + Database Developer Guide. + + :type cluster_identifier: string + :param cluster_identifier: A unique identifier for the cluster. You use + this identifier to refer to the cluster for any subsequent cluster + operations such as deleting or modifying. The identifier also + appears in the Amazon Redshift console. + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens. + + Alphabetic characters must be lowercase. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique for all clusters within an AWS account. + + + Example: `myexamplecluster` + + :type cluster_type: string + :param cluster_type: The type of the cluster. When cluster type is + specified as + + + `single-node`, the **NumberOfNodes** parameter is not required. + + `multi-node`, the **NumberOfNodes** parameter is required. + + + Valid Values: `multi-node` | `single-node` + + Default: `multi-node` + + :type node_type: string + :param node_type: The node type to be provisioned for the cluster. For + information about node types, go to ` Working with Clusters`_ in + the Amazon Redshift Management Guide . + Valid Values: `dw1.xlarge` | `dw1.8xlarge` | `dw2.large` | + `dw2.8xlarge`. + + :type master_username: string + :param master_username: + The user name associated with the master user account for the cluster + that is being created. + + Constraints: + + + + Must be 1 - 128 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word. A list of reserved words can be found in + `Reserved Words`_ in the Amazon Redshift Database Developer Guide. + + :type master_user_password: string + :param master_user_password: + The password associated with the master user account for the cluster + that is being created. + + Constraints: + + + + Must be between 8 and 64 characters in length. + + Must contain at least one uppercase letter. + + Must contain at least one lowercase letter. + + Must contain one number. + + Can be any printable ASCII character (ASCII code 33 to 126) except ' + (single quote), " (double quote), \, /, @, or space. + + :type cluster_security_groups: list + :param cluster_security_groups: A list of security groups to be + associated with this cluster. + Default: The default cluster security group for Amazon Redshift. + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: A list of Virtual Private Cloud (VPC) + security groups to be associated with the cluster. + Default: The default VPC security group is associated with the cluster. + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of a cluster subnet group to + be associated with this cluster. + If this parameter is not provided the resulting cluster will be + deployed outside virtual private cloud (VPC). + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone (AZ) in which you + want Amazon Redshift to provision the cluster. For example, if you + have several EC2 instances running in a specific Availability Zone, + then you might want the cluster to be provisioned in the same zone + in order to decrease network latency. + Default: A random, system-chosen Availability Zone in the region that + is specified by the endpoint. + + Example: `us-east-1d` + + Constraint: The specified Availability Zone must be in the same region + as the current endpoint. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which automated cluster maintenance can occur. + Format: `ddd:hh24:mi-ddd:hh24:mi` + + Default: A 30-minute window selected at random from an 8-hour block of + time per region, occurring on a random day of the week. The + following list shows the time blocks for each region from which the + default maintenance windows are assigned. + + + + **US-East (Northern Virginia) Region:** 03:00-11:00 UTC + + **US-West (Oregon) Region** 06:00-14:00 UTC + + **EU (Ireland) Region** 22:00-06:00 UTC + + **Asia Pacific (Singapore) Region** 14:00-22:00 UTC + + **Asia Pacific (Sydney) Region** 12:00-20:00 UTC + + **Asia Pacific (Tokyo) Region** 17:00-03:00 UTC + + + Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + + Constraints: Minimum 30-minute window. + + :type cluster_parameter_group_name: string + :param cluster_parameter_group_name: + The name of the parameter group to be associated with this cluster. + + Default: The default Amazon Redshift cluster parameter group. For + information about the default parameter group, go to `Working with + Amazon Redshift Parameter Groups`_ + + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type automated_snapshot_retention_period: integer + :param automated_snapshot_retention_period: The number of days that + automated snapshots are retained. If the value is 0, automated + snapshots are disabled. Even if automated snapshots are disabled, + you can still create manual snapshots when you want with + CreateClusterSnapshot. + Default: `1` + + Constraints: Must be a value from 0 to 35. + + :type port: integer + :param port: The port number on which the cluster accepts incoming + connections. + The cluster is accessible only via the JDBC and ODBC connection + strings. Part of the connection string requires the port on which + the cluster will listen for incoming connections. + + Default: `5439` + + Valid Values: `1150-65535` + + :type cluster_version: string + :param cluster_version: The version of the Amazon Redshift engine + software that you want to deploy on the cluster. + The version selected runs on all the nodes in the cluster. + + Constraints: Only version 1.0 is currently available. + + Example: `1.0` + + :type allow_version_upgrade: boolean + :param allow_version_upgrade: If `True`, upgrades can be applied during + the maintenance window to the Amazon Redshift engine that is + running on the cluster. + When a new version of the Amazon Redshift engine is released, you can + request that the service automatically apply upgrades during the + maintenance window to the Amazon Redshift engine that is running on + your cluster. + + Default: `True` + + :type number_of_nodes: integer + :param number_of_nodes: The number of compute nodes in the cluster. + This parameter is required when the **ClusterType** parameter is + specified as `multi-node`. + For information about determining how many nodes you need, go to ` + Working with Clusters`_ in the Amazon Redshift Management Guide . + + If you don't specify this parameter, you get a single-node cluster. + When requesting a multi-node cluster, you must specify the number + of nodes that you want in the cluster. + + Default: `1` + + Constraints: Value must be at least 1 and no more than 100. + + :type publicly_accessible: boolean + :param publicly_accessible: If `True`, the cluster can be accessed from + a public network. + + :type encrypted: boolean + :param encrypted: If `True`, the data in the cluster is encrypted at + rest. + Default: false + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: Specifies the name of the HSM + client certificate the Amazon Redshift cluster uses to retrieve the + data encryption keys stored in an HSM. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: Specifies the name of the HSM + configuration that contains the information the Amazon Redshift + cluster can use to retrieve and store keys in an HSM. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP (EIP) address for the cluster. + Constraints: The cluster must be provisioned in EC2-VPC and publicly- + accessible through an Internet gateway. For more information about + provisioning clusters in EC2-VPC, go to `Supported Platforms to + Launch Your Cluster`_ in the Amazon Redshift Management Guide. + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'NodeType': node_type, + 'MasterUsername': master_username, + 'MasterUserPassword': master_user_password, + } + if db_name is not None: + params['DBName'] = db_name + if cluster_type is not None: + params['ClusterType'] = cluster_type + if cluster_security_groups is not None: + self.build_list_params(params, + cluster_security_groups, + 'ClusterSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if cluster_subnet_group_name is not None: + params['ClusterSubnetGroupName'] = cluster_subnet_group_name + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if cluster_parameter_group_name is not None: + params['ClusterParameterGroupName'] = cluster_parameter_group_name + if automated_snapshot_retention_period is not None: + params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period + if port is not None: + params['Port'] = port + if cluster_version is not None: + params['ClusterVersion'] = cluster_version + if allow_version_upgrade is not None: + params['AllowVersionUpgrade'] = str( + allow_version_upgrade).lower() + if number_of_nodes is not None: + params['NumberOfNodes'] = number_of_nodes + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if encrypted is not None: + params['Encrypted'] = str( + encrypted).lower() + if hsm_client_certificate_identifier is not None: + params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier + if hsm_configuration_identifier is not None: + params['HsmConfigurationIdentifier'] = hsm_configuration_identifier + if elastic_ip is not None: + params['ElasticIp'] = elastic_ip + return self._make_request( + action='CreateCluster', + verb='POST', + path='/', params=params) + + def create_cluster_parameter_group(self, parameter_group_name, + parameter_group_family, description): + """ + Creates an Amazon Redshift parameter group. + + Creating parameter groups is independent of creating clusters. + You can associate a cluster with a parameter group when you + create the cluster. You can also associate an existing cluster + with a parameter group after the cluster is created by using + ModifyCluster. + + Parameters in the parameter group define specific behavior + that applies to the databases you create on the cluster. For + more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_name: string + :param parameter_group_name: + The name of the cluster parameter group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique within your AWS account. + + This value is stored as a lower-case string. + + :type parameter_group_family: string + :param parameter_group_family: The Amazon Redshift engine version to + which the cluster parameter group applies. The cluster engine + version determines the set of parameters. + To get a list of valid parameter group family names, you can call + DescribeClusterParameterGroups. By default, Amazon Redshift returns + a list of all the parameter groups that are owned by your AWS + account, including the default parameter groups for each Amazon + Redshift engine version. The parameter group family names + associated with the default parameter groups provide you the valid + values. For example, a valid family name is "redshift-1.0". + + :type description: string + :param description: A description of the parameter group. + + """ + params = { + 'ParameterGroupName': parameter_group_name, + 'ParameterGroupFamily': parameter_group_family, + 'Description': description, + } + return self._make_request( + action='CreateClusterParameterGroup', + verb='POST', + path='/', params=params) + + def create_cluster_security_group(self, cluster_security_group_name, + description): + """ + Creates a new Amazon Redshift security group. You use security + groups to control access to non-VPC clusters. + + For information about managing security groups, go to `Amazon + Redshift Cluster Security Groups`_ in the Amazon Redshift + Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name for the security group. + Amazon Redshift stores the value as a lowercase string. + Constraints: + + + + Must contain no more than 255 alphanumeric characters or hyphens. + + Must not be "Default". + + Must be unique for all security groups that are created by your AWS + account. + + + Example: `examplesecuritygroup` + + :type description: string + :param description: A description for the security group. + + """ + params = { + 'ClusterSecurityGroupName': cluster_security_group_name, + 'Description': description, + } + return self._make_request( + action='CreateClusterSecurityGroup', + verb='POST', + path='/', params=params) + + def create_cluster_snapshot(self, snapshot_identifier, + cluster_identifier): + """ + Creates a manual snapshot of the specified cluster. The + cluster must be in the `available` state. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type snapshot_identifier: string + :param snapshot_identifier: A unique identifier for the snapshot that + you are requesting. This identifier must be unique for all + snapshots within the AWS account. + Constraints: + + + + Cannot be null, empty, or blank + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-snapshot-id` + + :type cluster_identifier: string + :param cluster_identifier: The cluster identifier for which you want a + snapshot. + + """ + params = { + 'SnapshotIdentifier': snapshot_identifier, + 'ClusterIdentifier': cluster_identifier, + } + return self._make_request( + action='CreateClusterSnapshot', + verb='POST', + path='/', params=params) + + def create_cluster_subnet_group(self, cluster_subnet_group_name, + description, subnet_ids): + """ + Creates a new Amazon Redshift subnet group. You must provide a + list of one or more subnets in your existing Amazon Virtual + Private Cloud (Amazon VPC) when creating Amazon Redshift + subnet group. + + For information about subnet groups, go to `Amazon Redshift + Cluster Subnet Groups`_ in the Amazon Redshift Management + Guide . + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name for the subnet group. Amazon + Redshift stores the value as a lowercase string. + Constraints: + + + + Must contain no more than 255 alphanumeric characters or hyphens. + + Must not be "Default". + + Must be unique for all subnet groups that are created by your AWS + account. + + + Example: `examplesubnetgroup` + + :type description: string + :param description: A description for the subnet group. + + :type subnet_ids: list + :param subnet_ids: An array of VPC subnet IDs. A maximum of 20 subnets + can be modified in a single request. + + """ + params = { + 'ClusterSubnetGroupName': cluster_subnet_group_name, + 'Description': description, + } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + return self._make_request( + action='CreateClusterSubnetGroup', + verb='POST', + path='/', params=params) + + def create_event_subscription(self, subscription_name, sns_topic_arn, + source_type=None, source_ids=None, + event_categories=None, severity=None, + enabled=None): + """ + Creates an Amazon Redshift event notification subscription. + This action requires an ARN (Amazon Resource Name) of an + Amazon SNS topic created by either the Amazon Redshift + console, the Amazon SNS console, or the Amazon SNS API. To + obtain an ARN with Amazon SNS, you must create a topic in + Amazon SNS and subscribe to the topic. The ARN is displayed in + the SNS console. + + You can specify the source type, and lists of Amazon Redshift + source IDs, event categories, and event severities. + Notifications will be sent for all events you want that match + those criteria. For example, you can specify source type = + cluster, source ID = my-cluster-1 and mycluster2, event + categories = Availability, Backup, and severity = ERROR. The + subscription will only send notifications for those ERROR + events in the Availability and Backup categories for the + specified clusters. + + If you specify both the source type and source IDs, such as + source type = cluster and source identifier = my-cluster-1, + notifications will be sent for all the cluster events for my- + cluster-1. If you specify a source type but do not specify a + source identifier, you will receive notice of the events for + the objects of that type in your AWS account. If you do not + specify either the SourceType nor the SourceIdentifier, you + will be notified of events generated from all Amazon Redshift + sources belonging to your AWS account. You must specify a + source type if you specify a source ID. + + :type subscription_name: string + :param subscription_name: + The name of the event subscription to be created. + + Constraints: + + + + Cannot be null, empty, or blank. + + Must contain from 1 to 255 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the Amazon SNS + topic used to transmit the event notifications. The ARN is created + by Amazon SNS when you create a topic and subscribe to it. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a cluster, you would set this parameter to cluster. If this + value is not specified, events are returned for all Amazon Redshift + objects in your AWS account. You must specify a source type in + order to specify source IDs. + Valid values: cluster, cluster-parameter-group, cluster-security-group, + and cluster-snapshot. + + :type source_ids: list + :param source_ids: A list of one or more identifiers of Amazon Redshift + source objects. All of the objects must be of the same type as was + specified in the source type parameter. The event subscription will + return only events generated by the specified objects. If not + specified, then events are returned for all objects within the + source type specified. + Example: my-cluster-1, my-cluster-2 + + Example: my-snapshot-20131010 + + :type event_categories: list + :param event_categories: Specifies the Amazon Redshift event categories + to be published by the event notification subscription. + Values: Configuration, Management, Monitoring, Security + + :type severity: string + :param severity: Specifies the Amazon Redshift event severity to be + published by the event notification subscription. + Values: ERROR, INFO + + :type enabled: boolean + :param enabled: A Boolean value; set to `True` to activate the + subscription, set to `False` to create the subscription but not + active it. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SnsTopicArn': sns_topic_arn, + } + if source_type is not None: + params['SourceType'] = source_type + if source_ids is not None: + self.build_list_params(params, + source_ids, + 'SourceIds.member') + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if severity is not None: + params['Severity'] = severity + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + return self._make_request( + action='CreateEventSubscription', + verb='POST', + path='/', params=params) + + def create_hsm_client_certificate(self, + hsm_client_certificate_identifier): + """ + Creates an HSM client certificate that an Amazon Redshift + cluster will use to connect to the client's HSM in order to + store and retrieve the keys used to encrypt the cluster + databases. + + The command returns a public key, which you must store in the + HSM. In addition to creating the HSM certificate, you must + create an Amazon Redshift HSM configuration that provides a + cluster the information needed to store and use encryption + keys in the HSM. For more information, go to `Hardware + Security Modules`_ in the Amazon Redshift Management Guide. + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: The identifier to be assigned + to the new HSM client certificate that the cluster will use to + connect to the HSM to use the database encryption keys. + + """ + params = { + 'HsmClientCertificateIdentifier': hsm_client_certificate_identifier, + } + return self._make_request( + action='CreateHsmClientCertificate', + verb='POST', + path='/', params=params) + + def create_hsm_configuration(self, hsm_configuration_identifier, + description, hsm_ip_address, + hsm_partition_name, hsm_partition_password, + hsm_server_public_certificate): + """ + Creates an HSM configuration that contains the information + required by an Amazon Redshift cluster to store and use + database encryption keys in a Hardware Security Module (HSM). + After creating the HSM configuration, you can specify it as a + parameter when creating a cluster. The cluster will then store + its encryption keys in the HSM. + + In addition to creating an HSM configuration, you must also + create an HSM client certificate. For more information, go to + `Hardware Security Modules`_ in the Amazon Redshift Management + Guide. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: The identifier to be assigned to + the new Amazon Redshift HSM configuration. + + :type description: string + :param description: A text description of the HSM configuration to be + created. + + :type hsm_ip_address: string + :param hsm_ip_address: The IP address that the Amazon Redshift cluster + must use to access the HSM. + + :type hsm_partition_name: string + :param hsm_partition_name: The name of the partition in the HSM where + the Amazon Redshift clusters will store their database encryption + keys. + + :type hsm_partition_password: string + :param hsm_partition_password: The password required to access the HSM + partition. + + :type hsm_server_public_certificate: string + :param hsm_server_public_certificate: The HSMs public certificate file. + When using Cloud HSM, the file name is server.pem. + + """ + params = { + 'HsmConfigurationIdentifier': hsm_configuration_identifier, + 'Description': description, + 'HsmIpAddress': hsm_ip_address, + 'HsmPartitionName': hsm_partition_name, + 'HsmPartitionPassword': hsm_partition_password, + 'HsmServerPublicCertificate': hsm_server_public_certificate, + } + return self._make_request( + action='CreateHsmConfiguration', + verb='POST', + path='/', params=params) + + def delete_cluster(self, cluster_identifier, + skip_final_cluster_snapshot=None, + final_cluster_snapshot_identifier=None): + """ + Deletes a previously provisioned cluster. A successful + response from the web service indicates that the request was + received correctly. If a final cluster snapshot is requested + the status of the cluster will be "final-snapshot" while the + snapshot is being taken, then it's "deleting" once Amazon + Redshift begins deleting the cluster. Use DescribeClusters to + monitor the status of the deletion. The delete operation + cannot be canceled or reverted once submitted. For more + information about managing clusters, go to `Amazon Redshift + Clusters`_ in the Amazon Redshift Management Guide . + + :type cluster_identifier: string + :param cluster_identifier: + The identifier of the cluster to be deleted. + + Constraints: + + + + Must contain lowercase characters. + + Must contain from 1 to 63 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type skip_final_cluster_snapshot: boolean + :param skip_final_cluster_snapshot: Determines whether a final snapshot + of the cluster is created before Amazon Redshift deletes the + cluster. If `True`, a final cluster snapshot is not created. If + `False`, a final cluster snapshot is created before the cluster is + deleted. + Default: `False` + + :type final_cluster_snapshot_identifier: string + :param final_cluster_snapshot_identifier: + The identifier of the final snapshot that is to be created immediately + before deleting the cluster. If this parameter is provided, + SkipFinalClusterSnapshot must be `False`. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + if skip_final_cluster_snapshot is not None: + params['SkipFinalClusterSnapshot'] = str( + skip_final_cluster_snapshot).lower() + if final_cluster_snapshot_identifier is not None: + params['FinalClusterSnapshotIdentifier'] = final_cluster_snapshot_identifier + return self._make_request( + action='DeleteCluster', + verb='POST', + path='/', params=params) + + def delete_cluster_parameter_group(self, parameter_group_name): + """ + Deletes a specified Amazon Redshift parameter group. + + :type parameter_group_name: string + :param parameter_group_name: + The name of the parameter group to be deleted. + + Constraints: + + + + Must be the name of an existing cluster parameter group. + + Cannot delete a default cluster parameter group. + + """ + params = {'ParameterGroupName': parameter_group_name, } + return self._make_request( + action='DeleteClusterParameterGroup', + verb='POST', + path='/', params=params) + + def delete_cluster_security_group(self, cluster_security_group_name): + """ + Deletes an Amazon Redshift security group. + + For information about managing security groups, go to `Amazon + Redshift Cluster Security Groups`_ in the Amazon Redshift + Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name of the cluster security + group to be deleted. + + """ + params = { + 'ClusterSecurityGroupName': cluster_security_group_name, + } + return self._make_request( + action='DeleteClusterSecurityGroup', + verb='POST', + path='/', params=params) + + def delete_cluster_snapshot(self, snapshot_identifier, + snapshot_cluster_identifier=None): + """ + Deletes the specified manual snapshot. The snapshot must be in + the `available` state, with no other users authorized to + access the snapshot. + + Unlike automated snapshots, manual snapshots are retained even + after you delete your cluster. Amazon Redshift does not delete + your manual snapshots. You must delete manual snapshot + explicitly to avoid getting charged. If other accounts are + authorized to access the snapshot, you must revoke all of the + authorizations before you can delete the snapshot. + + :type snapshot_identifier: string + :param snapshot_identifier: The unique identifier of the manual + snapshot to be deleted. + Constraints: Must be the name of an existing snapshot that is in the + `available` state. + + :type snapshot_cluster_identifier: string + :param snapshot_cluster_identifier: The unique identifier of the + cluster the snapshot was created from. This parameter is required + if your IAM user has a policy containing a snapshot resource + element that specifies anything other than * for the cluster name. + Constraints: Must be the name of valid cluster. + + """ + params = {'SnapshotIdentifier': snapshot_identifier, } + if snapshot_cluster_identifier is not None: + params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier + return self._make_request( + action='DeleteClusterSnapshot', + verb='POST', + path='/', params=params) + + def delete_cluster_subnet_group(self, cluster_subnet_group_name): + """ + Deletes the specified cluster subnet group. + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of the cluster subnet group + name to be deleted. + + """ + params = { + 'ClusterSubnetGroupName': cluster_subnet_group_name, + } + return self._make_request( + action='DeleteClusterSubnetGroup', + verb='POST', + path='/', params=params) + + def delete_event_subscription(self, subscription_name): + """ + Deletes an Amazon Redshift event notification subscription. + + :type subscription_name: string + :param subscription_name: The name of the Amazon Redshift event + notification subscription to be deleted. + + """ + params = {'SubscriptionName': subscription_name, } + return self._make_request( + action='DeleteEventSubscription', + verb='POST', + path='/', params=params) + + def delete_hsm_client_certificate(self, + hsm_client_certificate_identifier): + """ + Deletes the specified HSM client certificate. + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: The identifier of the HSM + client certificate to be deleted. + + """ + params = { + 'HsmClientCertificateIdentifier': hsm_client_certificate_identifier, + } + return self._make_request( + action='DeleteHsmClientCertificate', + verb='POST', + path='/', params=params) + + def delete_hsm_configuration(self, hsm_configuration_identifier): + """ + Deletes the specified Amazon Redshift HSM configuration. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: The identifier of the Amazon + Redshift HSM configuration to be deleted. + + """ + params = { + 'HsmConfigurationIdentifier': hsm_configuration_identifier, + } + return self._make_request( + action='DeleteHsmConfiguration', + verb='POST', + path='/', params=params) + + def describe_cluster_parameter_groups(self, parameter_group_name=None, + max_records=None, marker=None): + """ + Returns a list of Amazon Redshift parameter groups, including + parameter groups you created and the default parameter group. + For each parameter group, the response includes the parameter + group name, description, and parameter group family name. You + can optionally specify a name to retrieve the description of a + specific parameter group. + + For more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_name: string + :param parameter_group_name: The name of a specific parameter group for + which to return details. By default, details about all parameter + groups and the default parameter group are returned. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusterParameterGroups request exceed the value specified + in `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if parameter_group_name is not None: + params['ParameterGroupName'] = parameter_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterParameterGroups', + verb='POST', + path='/', params=params) + + def describe_cluster_parameters(self, parameter_group_name, source=None, + max_records=None, marker=None): + """ + Returns a detailed list of parameters contained within the + specified Amazon Redshift parameter group. For each parameter + the response includes information such as parameter name, + description, data type, value, whether the parameter value is + modifiable, and so on. + + You can specify source filter to retrieve parameters of only + specific type. For example, to retrieve parameters that were + modified by a user action such as from + ModifyClusterParameterGroup, you can specify source equal to + user . + + For more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_name: string + :param parameter_group_name: The name of a cluster parameter group for + which to return details. + + :type source: string + :param source: The parameter types to return. Specify `user` to show + parameters that are different form the default. Similarly, specify + `engine-default` to show parameters that are the same as the + default parameter group. + Default: All parameter types returned. + + Valid Values: `user` | `engine-default` + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusterParameters request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {'ParameterGroupName': parameter_group_name, } + if source is not None: + params['Source'] = source + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterParameters', + verb='POST', + path='/', params=params) + + def describe_cluster_security_groups(self, + cluster_security_group_name=None, + max_records=None, marker=None): + """ + Returns information about Amazon Redshift security groups. If + the name of a security group is specified, the response will + contain only information about only that security group. + + For information about managing security groups, go to `Amazon + Redshift Cluster Security Groups`_ in the Amazon Redshift + Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name of a cluster security + group for which you are requesting details. You can specify either + the **Marker** parameter or a **ClusterSecurityGroupName** + parameter, but not both. + Example: `securitygroup1` + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusterSecurityGroups request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + Constraints: You can specify either the **ClusterSecurityGroupName** + parameter or the **Marker** parameter, but not both. + + """ + params = {} + if cluster_security_group_name is not None: + params['ClusterSecurityGroupName'] = cluster_security_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterSecurityGroups', + verb='POST', + path='/', params=params) + + def describe_cluster_snapshots(self, cluster_identifier=None, + snapshot_identifier=None, + snapshot_type=None, start_time=None, + end_time=None, max_records=None, + marker=None, owner_account=None): + """ + Returns one or more snapshot objects, which contain metadata + about your cluster snapshots. By default, this operation + returns information about all snapshots of all clusters that + are owned by you AWS customer account. No information is + returned for snapshots owned by inactive AWS customer + accounts. + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster for which + information about snapshots is requested. + + :type snapshot_identifier: string + :param snapshot_identifier: The snapshot identifier of the snapshot + about which to return information. + + :type snapshot_type: string + :param snapshot_type: The type of snapshots for which you are + requesting information. By default, snapshots of all types are + returned. + Valid Values: `automated` | `manual` + + :type start_time: timestamp + :param start_time: A value that requests only snapshots created at or + after the specified time. The time value is specified in ISO 8601 + format. For more information about ISO 8601, go to the `ISO8601 + Wikipedia page.`_ + Example: `2012-07-16T18:00:00Z` + + :type end_time: timestamp + :param end_time: A time value that requests only snapshots created at + or before the specified time. The time value is specified in ISO + 8601 format. For more information about ISO 8601, go to the + `ISO8601 Wikipedia page.`_ + Example: `2012-07-16T18:00:00Z` + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusterSnapshots request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + :type owner_account: string + :param owner_account: The AWS customer account used to create or copy + the snapshot. Use this field to filter the results to snapshots + owned by a particular account. To describe snapshots you own, + either specify your AWS customer account, or do not specify the + parameter. + + """ + params = {} + if cluster_identifier is not None: + params['ClusterIdentifier'] = cluster_identifier + if snapshot_identifier is not None: + params['SnapshotIdentifier'] = snapshot_identifier + if snapshot_type is not None: + params['SnapshotType'] = snapshot_type + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + if owner_account is not None: + params['OwnerAccount'] = owner_account + return self._make_request( + action='DescribeClusterSnapshots', + verb='POST', + path='/', params=params) + + def describe_cluster_subnet_groups(self, cluster_subnet_group_name=None, + max_records=None, marker=None): + """ + Returns one or more cluster subnet group objects, which + contain metadata about your cluster subnet groups. By default, + this operation returns information about all cluster subnet + groups that are defined in you AWS account. + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of the cluster subnet group + for which information is requested. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusterSubnetGroups request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if cluster_subnet_group_name is not None: + params['ClusterSubnetGroupName'] = cluster_subnet_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterSubnetGroups', + verb='POST', + path='/', params=params) + + def describe_cluster_versions(self, cluster_version=None, + cluster_parameter_group_family=None, + max_records=None, marker=None): + """ + Returns descriptions of the available Amazon Redshift cluster + versions. You can call this operation even before creating any + clusters to learn more about the Amazon Redshift versions. For + more information about managing clusters, go to `Amazon + Redshift Clusters`_ in the Amazon Redshift Management Guide + + :type cluster_version: string + :param cluster_version: The specific cluster version to return. + Example: `1.0` + + :type cluster_parameter_group_family: string + :param cluster_parameter_group_family: + The name of a specific cluster parameter group family to return details + for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusterVersions request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if cluster_version is not None: + params['ClusterVersion'] = cluster_version + if cluster_parameter_group_family is not None: + params['ClusterParameterGroupFamily'] = cluster_parameter_group_family + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterVersions', + verb='POST', + path='/', params=params) + + def describe_clusters(self, cluster_identifier=None, max_records=None, + marker=None): + """ + Returns properties of provisioned clusters including general + cluster properties, cluster database properties, maintenance + and backup properties, and security and access properties. + This operation supports pagination. For more information about + managing clusters, go to `Amazon Redshift Clusters`_ in the + Amazon Redshift Management Guide . + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of a cluster whose + properties you are requesting. This parameter is case sensitive. + The default is that all clusters defined for an account are returned. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusters request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + Constraints: You can specify either the **ClusterIdentifier** parameter + or the **Marker** parameter, but not both. + + """ + params = {} + if cluster_identifier is not None: + params['ClusterIdentifier'] = cluster_identifier + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusters', + verb='POST', + path='/', params=params) + + def describe_default_cluster_parameters(self, parameter_group_family, + max_records=None, marker=None): + """ + Returns a list of parameter settings for the specified + parameter group family. + + For more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_family: string + :param parameter_group_family: The name of the cluster parameter group + family. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeDefaultClusterParameters request exceed the value specified + in `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {'ParameterGroupFamily': parameter_group_family, } + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDefaultClusterParameters', + verb='POST', + path='/', params=params) + + def describe_event_categories(self, source_type=None): + """ + Displays a list of event categories for all event source + types, or for a specified source type. For a list of the event + categories and source types, go to `Amazon Redshift Event + Notifications`_. + + :type source_type: string + :param source_type: The source type, such as cluster or parameter + group, to which the described event categories apply. + Valid values: cluster, snapshot, parameter group, and security group. + + """ + params = {} + if source_type is not None: + params['SourceType'] = source_type + return self._make_request( + action='DescribeEventCategories', + verb='POST', + path='/', params=params) + + def describe_event_subscriptions(self, subscription_name=None, + max_records=None, marker=None): + """ + Lists descriptions of all the Amazon Redshift event + notifications subscription for a customer account. If you + specify a subscription name, lists the description for that + subscription. + + :type subscription_name: string + :param subscription_name: The name of the Amazon Redshift event + notification subscription to be described. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeEventSubscriptions request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if subscription_name is not None: + params['SubscriptionName'] = subscription_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEventSubscriptions', + verb='POST', + path='/', params=params) + + def describe_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, duration=None, + max_records=None, marker=None): + """ + Returns events related to clusters, security groups, + snapshots, and parameter groups for the past 14 days. Events + specific to a particular cluster, security group, snapshot or + parameter group can be obtained by providing the name as a + parameter. By default, the past hour of events are returned. + + :type source_identifier: string + :param source_identifier: + The identifier of the event source for which events will be returned. + If this parameter is not specified, then all sources are included + in the response. + + Constraints: + + If SourceIdentifier is supplied, SourceType must also be provided. + + + + Specify a cluster identifier when SourceType is `cluster`. + + Specify a cluster security group name when SourceType is `cluster- + security-group`. + + Specify a cluster parameter group name when SourceType is `cluster- + parameter-group`. + + Specify a cluster snapshot identifier when SourceType is `cluster- + snapshot`. + + :type source_type: string + :param source_type: + The event source to retrieve events for. If no value is specified, all + events are returned. + + Constraints: + + If SourceType is supplied, SourceIdentifier must also be provided. + + + + Specify `cluster` when SourceIdentifier is a cluster identifier. + + Specify `cluster-security-group` when SourceIdentifier is a cluster + security group name. + + Specify `cluster-parameter-group` when SourceIdentifier is a cluster + parameter group name. + + Specify `cluster-snapshot` when SourceIdentifier is a cluster + snapshot identifier. + + :type start_time: timestamp + :param start_time: The beginning of the time interval to retrieve + events for, specified in ISO 8601 format. For more information + about ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: `2009-07-08T18:00Z` + + :type end_time: timestamp + :param end_time: The end of the time interval for which to retrieve + events, specified in ISO 8601 format. For more information about + ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: `2009-07-08T18:00Z` + + :type duration: integer + :param duration: The number of minutes prior to the time of the request + for which to retrieve events. For example, if the request is sent + at 18:00 and you specify a duration of 60, then only events which + have occurred after 17:00 will be returned. + Default: `60` + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeEvents request exceed the value specified in `MaxRecords`, + AWS returns a value in the `Marker` field of the response. You can + retrieve the next set of response records by providing the returned + marker value in the `Marker` parameter and retrying the request. + + """ + params = {} + if source_identifier is not None: + params['SourceIdentifier'] = source_identifier + if source_type is not None: + params['SourceType'] = source_type + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if duration is not None: + params['Duration'] = duration + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEvents', + verb='POST', + path='/', params=params) + + def describe_hsm_client_certificates(self, + hsm_client_certificate_identifier=None, + max_records=None, marker=None): + """ + Returns information about the specified HSM client + certificate. If no certificate ID is specified, returns + information about all the HSM certificates owned by your AWS + customer account. + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: The identifier of a specific + HSM client certificate for which you want information. If no + identifier is specified, information is returned for all HSM client + certificates owned by your AWS customer account. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeHsmClientCertificates request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if hsm_client_certificate_identifier is not None: + params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeHsmClientCertificates', + verb='POST', + path='/', params=params) + + def describe_hsm_configurations(self, hsm_configuration_identifier=None, + max_records=None, marker=None): + """ + Returns information about the specified Amazon Redshift HSM + configuration. If no configuration ID is specified, returns + information about all the HSM configurations owned by your AWS + customer account. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: The identifier of a specific + Amazon Redshift HSM configuration to be described. If no identifier + is specified, information is returned for all HSM configurations + owned by your AWS customer account. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeHsmConfigurations request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if hsm_configuration_identifier is not None: + params['HsmConfigurationIdentifier'] = hsm_configuration_identifier + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeHsmConfigurations', + verb='POST', + path='/', params=params) + + def describe_logging_status(self, cluster_identifier): + """ + Describes whether information, such as queries and connection + attempts, is being logged for the specified Amazon Redshift + cluster. + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster to get the + logging status from. + Example: `examplecluster` + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='DescribeLoggingStatus', + verb='POST', + path='/', params=params) + + def describe_orderable_cluster_options(self, cluster_version=None, + node_type=None, max_records=None, + marker=None): + """ + Returns a list of orderable cluster options. Before you create + a new cluster you can use this operation to find what options + are available, such as the EC2 Availability Zones (AZ) in the + specific AWS region that you can specify, and the node types + you can request. The node types differ by available storage, + memory, CPU and price. With the cost involved you might want + to obtain a list of cluster options in the specific region and + specify values when creating a cluster. For more information + about managing clusters, go to `Amazon Redshift Clusters`_ in + the Amazon Redshift Management Guide + + :type cluster_version: string + :param cluster_version: The version filter value. Specify this + parameter to show only the available offerings matching the + specified version. + Default: All versions. + + Constraints: Must be one of the version returned from + DescribeClusterVersions. + + :type node_type: string + :param node_type: The node type filter value. Specify this parameter to + show only the available offerings matching the specified node type. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeOrderableClusterOptions request exceed the value specified + in `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if cluster_version is not None: + params['ClusterVersion'] = cluster_version + if node_type is not None: + params['NodeType'] = node_type + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeOrderableClusterOptions', + verb='POST', + path='/', params=params) + + def describe_reserved_node_offerings(self, + reserved_node_offering_id=None, + max_records=None, marker=None): + """ + Returns a list of the available reserved node offerings by + Amazon Redshift with their descriptions including the node + type, the fixed and recurring costs of reserving the node and + duration the node will be reserved for you. These descriptions + help you determine which reserve node offering you want to + purchase. You then use the unique offering ID in you call to + PurchaseReservedNodeOffering to reserve one or more nodes for + your Amazon Redshift cluster. + + For more information about managing parameter groups, go to + `Purchasing Reserved Nodes`_ in the Amazon Redshift Management + Guide . + + :type reserved_node_offering_id: string + :param reserved_node_offering_id: The unique identifier for the + offering. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeReservedNodeOfferings request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if reserved_node_offering_id is not None: + params['ReservedNodeOfferingId'] = reserved_node_offering_id + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedNodeOfferings', + verb='POST', + path='/', params=params) + + def describe_reserved_nodes(self, reserved_node_id=None, + max_records=None, marker=None): + """ + Returns the descriptions of the reserved nodes. + + :type reserved_node_id: string + :param reserved_node_id: Identifier for the node reservation. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeReservedNodes request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if reserved_node_id is not None: + params['ReservedNodeId'] = reserved_node_id + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedNodes', + verb='POST', + path='/', params=params) + + def describe_resize(self, cluster_identifier): + """ + Returns information about the last resize operation for the + specified cluster. If no resize operation has ever been + initiated for the specified cluster, a `HTTP 404` error is + returned. If a resize operation was initiated and completed, + the status of the resize remains as `SUCCEEDED` until the next + resize. + + A resize operation can be requested using ModifyCluster and + specifying a different number or type of nodes for the + cluster. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of a cluster whose + resize progress you are requesting. This parameter isn't case- + sensitive. + By default, resize operations for all clusters defined for an AWS + account are returned. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='DescribeResize', + verb='POST', + path='/', params=params) + + def disable_logging(self, cluster_identifier): + """ + Stops logging information, such as queries and connection + attempts, for the specified Amazon Redshift cluster. + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster on which + logging is to be stopped. + Example: `examplecluster` + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='DisableLogging', + verb='POST', + path='/', params=params) + + def disable_snapshot_copy(self, cluster_identifier): + """ + Disables the automatic copying of snapshots from one region to + another region for a specified cluster. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the source cluster + that you want to disable copying of snapshots to a destination + region. + Constraints: Must be the valid name of an existing cluster that has + cross-region snapshot copy enabled. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='DisableSnapshotCopy', + verb='POST', + path='/', params=params) + + def enable_logging(self, cluster_identifier, bucket_name, + s3_key_prefix=None): + """ + Starts logging information, such as queries and connection + attempts, for the specified Amazon Redshift cluster. + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster on which + logging is to be started. + Example: `examplecluster` + + :type bucket_name: string + :param bucket_name: + The name of an existing S3 bucket where the log files are to be stored. + + Constraints: + + + + Must be in the same region as the cluster + + The cluster must have read bucket and put object permissions + + :type s3_key_prefix: string + :param s3_key_prefix: + The prefix applied to the log file names. + + Constraints: + + + + Cannot exceed 512 characters + + Cannot contain spaces( ), double quotes ("), single quotes ('), a + backslash (\), or control characters. The hexadecimal codes for + invalid characters are: + + + x00 to x20 + + x22 + + x27 + + x5c + + x7f or larger + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'BucketName': bucket_name, + } + if s3_key_prefix is not None: + params['S3KeyPrefix'] = s3_key_prefix + return self._make_request( + action='EnableLogging', + verb='POST', + path='/', params=params) + + def enable_snapshot_copy(self, cluster_identifier, destination_region, + retention_period=None): + """ + Enables the automatic copy of snapshots from one region to + another region for a specified cluster. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the source cluster + to copy snapshots from. + Constraints: Must be the valid name of an existing cluster that does + not already have cross-region snapshot copy enabled. + + :type destination_region: string + :param destination_region: The destination region that you want to copy + snapshots to. + Constraints: Must be the name of a valid region. For more information, + see `Regions and Endpoints`_ in the Amazon Web Services General + Reference. + + :type retention_period: integer + :param retention_period: The number of days to retain automated + snapshots in the destination region after they are copied from the + source region. + Default: 7. + + Constraints: Must be at least 1 and no more than 35. + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'DestinationRegion': destination_region, + } + if retention_period is not None: + params['RetentionPeriod'] = retention_period + return self._make_request( + action='EnableSnapshotCopy', + verb='POST', + path='/', params=params) + + def modify_cluster(self, cluster_identifier, cluster_type=None, + node_type=None, number_of_nodes=None, + cluster_security_groups=None, + vpc_security_group_ids=None, + master_user_password=None, + cluster_parameter_group_name=None, + automated_snapshot_retention_period=None, + preferred_maintenance_window=None, + cluster_version=None, allow_version_upgrade=None, + hsm_client_certificate_identifier=None, + hsm_configuration_identifier=None, + new_cluster_identifier=None): + """ + Modifies the settings for a cluster. For example, you can add + another security or parameter group, update the preferred + maintenance window, or change the master user password. + Resetting a cluster password or modifying the security groups + associated with a cluster do not need a reboot. However, + modifying a parameter group requires a reboot for parameters + to take effect. For more information about managing clusters, + go to `Amazon Redshift Clusters`_ in the Amazon Redshift + Management Guide + + You can also change node type and the number of nodes to scale + up or down the cluster. When resizing a cluster, you must + specify both the number of nodes and the node type even if one + of the parameters does not change. If you specify the same + number of nodes and node type that are already configured for + the cluster, an error is returned. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the cluster to be + modified. + Example: `examplecluster` + + :type cluster_type: string + :param cluster_type: The new cluster type. + When you submit your cluster resize request, your existing cluster goes + into a read-only mode. After Amazon Redshift provisions a new + cluster based on your resize requirements, there will be outage for + a period while the old cluster is deleted and your connection is + switched to the new cluster. You can use DescribeResize to track + the progress of the resize request. + + Valid Values: ` multi-node | single-node ` + + :type node_type: string + :param node_type: The new node type of the cluster. If you specify a + new node type, you must also specify the number of nodes parameter + also. + When you submit your request to resize a cluster, Amazon Redshift sets + access permissions for the cluster to read-only. After Amazon + Redshift provisions a new cluster according to your resize + requirements, there will be a temporary outage while the old + cluster is deleted and your connection is switched to the new + cluster. When the new connection is complete, the original access + permissions for the cluster are restored. You can use the + DescribeResize to track the progress of the resize request. + + Valid Values: ` dw1.xlarge` | `dw1.8xlarge` | `dw2.large` | + `dw2.8xlarge`. + + :type number_of_nodes: integer + :param number_of_nodes: The new number of nodes of the cluster. If you + specify a new number of nodes, you must also specify the node type + parameter also. + When you submit your request to resize a cluster, Amazon Redshift sets + access permissions for the cluster to read-only. After Amazon + Redshift provisions a new cluster according to your resize + requirements, there will be a temporary outage while the old + cluster is deleted and your connection is switched to the new + cluster. When the new connection is complete, the original access + permissions for the cluster are restored. You can use + DescribeResize to track the progress of the resize request. + + Valid Values: Integer greater than `0`. + + :type cluster_security_groups: list + :param cluster_security_groups: + A list of cluster security groups to be authorized on this cluster. + This change is asynchronously applied as soon as possible. + + Security groups currently associated with the cluster, and not in the + list of groups to apply, will be revoked from the cluster. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: A list of virtual private cloud (VPC) + security groups to be associated with the cluster. + + :type master_user_password: string + :param master_user_password: + The new password for the cluster master user. This change is + asynchronously applied as soon as possible. Between the time of the + request and the completion of the request, the `MasterUserPassword` + element exists in the `PendingModifiedValues` element of the + operation response. + + Default: Uses existing setting. + + Constraints: + + + + Must be between 8 and 64 characters in length. + + Must contain at least one uppercase letter. + + Must contain at least one lowercase letter. + + Must contain one number. + + Can be any printable ASCII character (ASCII code 33 to 126) except ' + (single quote), " (double quote), \, /, @, or space. + + :type cluster_parameter_group_name: string + :param cluster_parameter_group_name: The name of the cluster parameter + group to apply to this cluster. This change is applied only after + the cluster is rebooted. To reboot a cluster use RebootCluster. + Default: Uses existing setting. + + Constraints: The cluster parameter group must be in the same parameter + group family that matches the cluster version. + + :type automated_snapshot_retention_period: integer + :param automated_snapshot_retention_period: The number of days that + automated snapshots are retained. If the value is 0, automated + snapshots are disabled. Even if automated snapshots are disabled, + you can still create manual snapshots when you want with + CreateClusterSnapshot. + If you decrease the automated snapshot retention period from its + current value, existing automated snapshots that fall outside of + the new retention period will be immediately deleted. + + Default: Uses existing setting. + + Constraints: Must be a value from 0 to 35. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur, if necessary. If system + maintenance is necessary during the window, it may result in an + outage. + This maintenance window change is made immediately. If the new + maintenance window indicates the current time, there must be at + least 120 minutes between the current time and end of the window in + order to ensure that pending changes are applied. + + Default: Uses existing setting. + + Format: ddd:hh24:mi-ddd:hh24:mi, for example `wed:07:30-wed:08:00`. + + Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + + Constraints: Must be at least 30 minutes. + + :type cluster_version: string + :param cluster_version: The new version number of the Amazon Redshift + engine to upgrade to. + For major version upgrades, if a non-default cluster parameter group is + currently in use, a new cluster parameter group in the cluster + parameter group family for the new version must be specified. The + new cluster parameter group can be the default for that cluster + parameter group family. For more information about managing + parameter groups, go to `Amazon Redshift Parameter Groups`_ in the + Amazon Redshift Management Guide . + + Example: `1.0` + + :type allow_version_upgrade: boolean + :param allow_version_upgrade: If `True`, upgrades will be applied + automatically to the cluster during the maintenance window. + Default: `False` + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: Specifies the name of the HSM + client certificate the Amazon Redshift cluster uses to retrieve the + data encryption keys stored in an HSM. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: Specifies the name of the HSM + configuration that contains the information the Amazon Redshift + cluster can use to retrieve and store keys in an HSM. + + :type new_cluster_identifier: string + :param new_cluster_identifier: The new identifier for the cluster. + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens. + + Alphabetic characters must be lowercase. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique for all clusters within an AWS account. + + + Example: `examplecluster` + + """ + params = {'ClusterIdentifier': cluster_identifier, } + if cluster_type is not None: + params['ClusterType'] = cluster_type + if node_type is not None: + params['NodeType'] = node_type + if number_of_nodes is not None: + params['NumberOfNodes'] = number_of_nodes + if cluster_security_groups is not None: + self.build_list_params(params, + cluster_security_groups, + 'ClusterSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if master_user_password is not None: + params['MasterUserPassword'] = master_user_password + if cluster_parameter_group_name is not None: + params['ClusterParameterGroupName'] = cluster_parameter_group_name + if automated_snapshot_retention_period is not None: + params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if cluster_version is not None: + params['ClusterVersion'] = cluster_version + if allow_version_upgrade is not None: + params['AllowVersionUpgrade'] = str( + allow_version_upgrade).lower() + if hsm_client_certificate_identifier is not None: + params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier + if hsm_configuration_identifier is not None: + params['HsmConfigurationIdentifier'] = hsm_configuration_identifier + if new_cluster_identifier is not None: + params['NewClusterIdentifier'] = new_cluster_identifier + return self._make_request( + action='ModifyCluster', + verb='POST', + path='/', params=params) + + def modify_cluster_parameter_group(self, parameter_group_name, + parameters): + """ + Modifies the parameters of a parameter group. + + For more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_name: string + :param parameter_group_name: The name of the parameter group to be + modified. + + :type parameters: list + :param parameters: An array of parameters to be modified. A maximum of + 20 parameters can be modified in a single request. + For each parameter to be modified, you must supply at least the + parameter name and parameter value; other name-value pairs of the + parameter are optional. + + For the workload management (WLM) configuration, you must supply all + the name-value pairs in the wlm_json_configuration parameter. + + """ + params = {'ParameterGroupName': parameter_group_name, } + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion')) + return self._make_request( + action='ModifyClusterParameterGroup', + verb='POST', + path='/', params=params) + + def modify_cluster_subnet_group(self, cluster_subnet_group_name, + subnet_ids, description=None): + """ + Modifies a cluster subnet group to include the specified list + of VPC subnets. The operation replaces the existing list of + subnets with the new list of subnets. + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of the subnet group to be + modified. + + :type description: string + :param description: A text description of the subnet group to be + modified. + + :type subnet_ids: list + :param subnet_ids: An array of VPC subnet IDs. A maximum of 20 subnets + can be modified in a single request. + + """ + params = { + 'ClusterSubnetGroupName': cluster_subnet_group_name, + } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + if description is not None: + params['Description'] = description + return self._make_request( + action='ModifyClusterSubnetGroup', + verb='POST', + path='/', params=params) + + def modify_event_subscription(self, subscription_name, + sns_topic_arn=None, source_type=None, + source_ids=None, event_categories=None, + severity=None, enabled=None): + """ + Modifies an existing Amazon Redshift event notification + subscription. + + :type subscription_name: string + :param subscription_name: The name of the modified Amazon Redshift + event notification subscription. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic + to be used by the event notification subscription. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a cluster, you would set this parameter to cluster. If this + value is not specified, events are returned for all Amazon Redshift + objects in your AWS account. You must specify a source type in + order to specify source IDs. + Valid values: cluster, cluster-parameter-group, cluster-security-group, + and cluster-snapshot. + + :type source_ids: list + :param source_ids: A list of one or more identifiers of Amazon Redshift + source objects. All of the objects must be of the same type as was + specified in the source type parameter. The event subscription will + return only events generated by the specified objects. If not + specified, then events are returned for all objects within the + source type specified. + Example: my-cluster-1, my-cluster-2 + + Example: my-snapshot-20131010 + + :type event_categories: list + :param event_categories: Specifies the Amazon Redshift event categories + to be published by the event notification subscription. + Values: Configuration, Management, Monitoring, Security + + :type severity: string + :param severity: Specifies the Amazon Redshift event severity to be + published by the event notification subscription. + Values: ERROR, INFO + + :type enabled: boolean + :param enabled: A Boolean value indicating if the subscription is + enabled. `True` indicates the subscription is enabled + + """ + params = {'SubscriptionName': subscription_name, } + if sns_topic_arn is not None: + params['SnsTopicArn'] = sns_topic_arn + if source_type is not None: + params['SourceType'] = source_type + if source_ids is not None: + self.build_list_params(params, + source_ids, + 'SourceIds.member') + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if severity is not None: + params['Severity'] = severity + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + return self._make_request( + action='ModifyEventSubscription', + verb='POST', + path='/', params=params) + + def modify_snapshot_copy_retention_period(self, cluster_identifier, + retention_period): + """ + Modifies the number of days to retain automated snapshots in + the destination region after they are copied from the source + region. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the cluster for + which you want to change the retention period for automated + snapshots that are copied to a destination region. + Constraints: Must be the valid name of an existing cluster that has + cross-region snapshot copy enabled. + + :type retention_period: integer + :param retention_period: The number of days to retain automated + snapshots in the destination region after they are copied from the + source region. + If you decrease the retention period for automated snapshots that are + copied to a destination region, Amazon Redshift will delete any + existing automated snapshots that were copied to the destination + region and that fall outside of the new retention period. + + Constraints: Must be at least 1 and no more than 35. + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'RetentionPeriod': retention_period, + } + return self._make_request( + action='ModifySnapshotCopyRetentionPeriod', + verb='POST', + path='/', params=params) + + def purchase_reserved_node_offering(self, reserved_node_offering_id, + node_count=None): + """ + Allows you to purchase reserved nodes. Amazon Redshift offers + a predefined set of reserved node offerings. You can purchase + one of the offerings. You can call the + DescribeReservedNodeOfferings API to obtain the available + reserved node offerings. You can call this API by providing a + specific reserved node offering and the number of nodes you + want to reserve. + + For more information about managing parameter groups, go to + `Purchasing Reserved Nodes`_ in the Amazon Redshift Management + Guide . + + :type reserved_node_offering_id: string + :param reserved_node_offering_id: The unique identifier of the reserved + node offering you want to purchase. + + :type node_count: integer + :param node_count: The number of reserved nodes you want to purchase. + Default: `1` + + """ + params = { + 'ReservedNodeOfferingId': reserved_node_offering_id, + } + if node_count is not None: + params['NodeCount'] = node_count + return self._make_request( + action='PurchaseReservedNodeOffering', + verb='POST', + path='/', params=params) + + def reboot_cluster(self, cluster_identifier): + """ + Reboots a cluster. This action is taken as soon as possible. + It results in a momentary outage to the cluster, during which + the cluster status is set to `rebooting`. A cluster event is + created when the reboot is completed. Any pending cluster + modifications (see ModifyCluster) are applied at this reboot. + For more information about managing clusters, go to `Amazon + Redshift Clusters`_ in the Amazon Redshift Management Guide + + :type cluster_identifier: string + :param cluster_identifier: The cluster identifier. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='RebootCluster', + verb='POST', + path='/', params=params) + + def reset_cluster_parameter_group(self, parameter_group_name, + reset_all_parameters=None, + parameters=None): + """ + Sets one or more parameters of the specified parameter group + to their default values and sets the source values of the + parameters to "engine-default". To reset the entire parameter + group specify the ResetAllParameters parameter. For parameter + changes to take effect you must reboot any associated + clusters. + + :type parameter_group_name: string + :param parameter_group_name: The name of the cluster parameter group to + be reset. + + :type reset_all_parameters: boolean + :param reset_all_parameters: If `True`, all parameters in the specified + parameter group will be reset to their default values. + Default: `True` + + :type parameters: list + :param parameters: An array of names of parameters to be reset. If + ResetAllParameters option is not used, then at least one parameter + name must be supplied. + Constraints: A maximum of 20 parameters can be reset in a single + request. + + """ + params = {'ParameterGroupName': parameter_group_name, } + if reset_all_parameters is not None: + params['ResetAllParameters'] = str( + reset_all_parameters).lower() + if parameters is not None: + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion')) + return self._make_request( + action='ResetClusterParameterGroup', + verb='POST', + path='/', params=params) + + def restore_from_cluster_snapshot(self, cluster_identifier, + snapshot_identifier, + snapshot_cluster_identifier=None, + port=None, availability_zone=None, + allow_version_upgrade=None, + cluster_subnet_group_name=None, + publicly_accessible=None, + owner_account=None, + hsm_client_certificate_identifier=None, + hsm_configuration_identifier=None, + elastic_ip=None, + cluster_parameter_group_name=None, + cluster_security_groups=None, + vpc_security_group_ids=None, + preferred_maintenance_window=None, + automated_snapshot_retention_period=None): + """ + Creates a new cluster from a snapshot. Amazon Redshift creates + the resulting cluster with the same configuration as the + original cluster from which the snapshot was created, except + that the new cluster is created with the default cluster + security and parameter group. After Amazon Redshift creates + the cluster you can use the ModifyCluster API to associate a + different security group and different parameter group with + the restored cluster. + + If you restore a cluster into a VPC, you must provide a + cluster subnet group where you want the cluster restored. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster that will be + created from restoring the snapshot. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens. + + Alphabetic characters must be lowercase. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique for all clusters within an AWS account. + + :type snapshot_identifier: string + :param snapshot_identifier: The name of the snapshot from which to + create the new cluster. This parameter isn't case sensitive. + Example: `my-snapshot-id` + + :type snapshot_cluster_identifier: string + :param snapshot_cluster_identifier: The name of the cluster the source + snapshot was created from. This parameter is required if your IAM + user has a policy containing a snapshot resource element that + specifies anything other than * for the cluster name. + + :type port: integer + :param port: The port number on which the cluster accepts connections. + Default: The same port as the original cluster. + + Constraints: Must be between `1115` and `65535`. + + :type availability_zone: string + :param availability_zone: The Amazon EC2 Availability Zone in which to + restore the cluster. + Default: A random, system-chosen Availability Zone. + + Example: `us-east-1a` + + :type allow_version_upgrade: boolean + :param allow_version_upgrade: If `True`, upgrades can be applied during + the maintenance window to the Amazon Redshift engine that is + running on the cluster. + Default: `True` + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of the subnet group where + you want to cluster restored. + A snapshot of cluster in VPC can be restored only in VPC. Therefore, + you must provide subnet group name where you want the cluster + restored. + + :type publicly_accessible: boolean + :param publicly_accessible: If `True`, the cluster can be accessed from + a public network. + + :type owner_account: string + :param owner_account: The AWS customer account used to create or copy + the snapshot. Required if you are restoring a snapshot you do not + own, optional if you own the snapshot. + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: Specifies the name of the HSM + client certificate the Amazon Redshift cluster uses to retrieve the + data encryption keys stored in an HSM. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: Specifies the name of the HSM + configuration that contains the information the Amazon Redshift + cluster can use to retrieve and store keys in an HSM. + + :type elastic_ip: string + :param elastic_ip: The elastic IP (EIP) address for the cluster. + + :type cluster_parameter_group_name: string + :param cluster_parameter_group_name: + The name of the parameter group to be associated with this cluster. + + Default: The default Amazon Redshift cluster parameter group. For + information about the default parameter group, go to `Working with + Amazon Redshift Parameter Groups`_. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type cluster_security_groups: list + :param cluster_security_groups: A list of security groups to be + associated with this cluster. + Default: The default cluster security group for Amazon Redshift. + + Cluster security groups only apply to clusters outside of VPCs. + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: A list of Virtual Private Cloud (VPC) + security groups to be associated with the cluster. + Default: The default VPC security group is associated with the cluster. + + VPC security groups only apply to clusters in VPCs. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which automated cluster maintenance can occur. + Format: `ddd:hh24:mi-ddd:hh24:mi` + + Default: The value selected for the cluster from which the snapshot was + taken. The following list shows the time blocks for each region + from which the default maintenance windows are assigned. + + + + **US-East (Northern Virginia) Region:** 03:00-11:00 UTC + + **US-West (Oregon) Region** 06:00-14:00 UTC + + **EU (Ireland) Region** 22:00-06:00 UTC + + **Asia Pacific (Singapore) Region** 14:00-22:00 UTC + + **Asia Pacific (Sydney) Region** 12:00-20:00 UTC + + **Asia Pacific (Tokyo) Region** 17:00-03:00 UTC + + + Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + + Constraints: Minimum 30-minute window. + + :type automated_snapshot_retention_period: integer + :param automated_snapshot_retention_period: The number of days that + automated snapshots are retained. If the value is 0, automated + snapshots are disabled. Even if automated snapshots are disabled, + you can still create manual snapshots when you want with + CreateClusterSnapshot. + Default: The value selected for the cluster from which the snapshot was + taken. + + Constraints: Must be a value from 0 to 35. + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'SnapshotIdentifier': snapshot_identifier, + } + if snapshot_cluster_identifier is not None: + params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier + if port is not None: + params['Port'] = port + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if allow_version_upgrade is not None: + params['AllowVersionUpgrade'] = str( + allow_version_upgrade).lower() + if cluster_subnet_group_name is not None: + params['ClusterSubnetGroupName'] = cluster_subnet_group_name + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if owner_account is not None: + params['OwnerAccount'] = owner_account + if hsm_client_certificate_identifier is not None: + params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier + if hsm_configuration_identifier is not None: + params['HsmConfigurationIdentifier'] = hsm_configuration_identifier + if elastic_ip is not None: + params['ElasticIp'] = elastic_ip + if cluster_parameter_group_name is not None: + params['ClusterParameterGroupName'] = cluster_parameter_group_name + if cluster_security_groups is not None: + self.build_list_params(params, + cluster_security_groups, + 'ClusterSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if automated_snapshot_retention_period is not None: + params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period + return self._make_request( + action='RestoreFromClusterSnapshot', + verb='POST', + path='/', params=params) + + def revoke_cluster_security_group_ingress(self, + cluster_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_owner_id=None): + """ + Revokes an ingress rule in an Amazon Redshift security group + for a previously authorized IP range or Amazon EC2 security + group. To add an ingress rule, see + AuthorizeClusterSecurityGroupIngress. For information about + managing security groups, go to `Amazon Redshift Cluster + Security Groups`_ in the Amazon Redshift Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name of the security Group from + which to revoke the ingress rule. + + :type cidrip: string + :param cidrip: The IP range for which to revoke access. This range must + be a valid Classless Inter-Domain Routing (CIDR) block of IP + addresses. If `CIDRIP` is specified, `EC2SecurityGroupName` and + `EC2SecurityGroupOwnerId` cannot be provided. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 Security Group + whose access is to be revoked. If `EC2SecurityGroupName` is + specified, `EC2SecurityGroupOwnerId` must also be provided and + `CIDRIP` cannot be provided. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS account number of the owner + of the security group specified in the `EC2SecurityGroupName` + parameter. The AWS access key ID is not an acceptable value. If + `EC2SecurityGroupOwnerId` is specified, `EC2SecurityGroupName` must + also be provided. and `CIDRIP` cannot be provided. + Example: `111122223333` + + """ + params = { + 'ClusterSecurityGroupName': cluster_security_group_name, + } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='RevokeClusterSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def revoke_snapshot_access(self, snapshot_identifier, + account_with_restore_access, + snapshot_cluster_identifier=None): + """ + Removes the ability of the specified AWS customer account to + restore the specified snapshot. If the account is currently + restoring the snapshot, the restore will run to completion. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type snapshot_identifier: string + :param snapshot_identifier: The identifier of the snapshot that the + account can no longer access. + + :type snapshot_cluster_identifier: string + :param snapshot_cluster_identifier: The identifier of the cluster the + snapshot was created from. This parameter is required if your IAM + user has a policy containing a snapshot resource element that + specifies anything other than * for the cluster name. + + :type account_with_restore_access: string + :param account_with_restore_access: The identifier of the AWS customer + account that can no longer restore the specified snapshot. + + """ + params = { + 'SnapshotIdentifier': snapshot_identifier, + 'AccountWithRestoreAccess': account_with_restore_access, + } + if snapshot_cluster_identifier is not None: + params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier + return self._make_request( + action='RevokeSnapshotAccess', + verb='POST', + path='/', params=params) + + def rotate_encryption_key(self, cluster_identifier): + """ + Rotates the encryption keys for a cluster. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the cluster that + you want to rotate the encryption keys for. + Constraints: Must be the name of valid cluster that has encryption + enabled. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='RotateEncryptionKey', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/regioninfo.py b/desktop/core/ext-py/boto-2.38.0/boto/regioninfo.py new file mode 100644 index 0000000000000000000000000000000000000000..6aeda122f94127ba11314839d86975b29118f4d2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/regioninfo.py @@ -0,0 +1,187 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import os + +import boto +from boto.compat import json +from boto.exception import BotoClientError + + +def load_endpoint_json(path): + """ + Loads a given JSON file & returns it. + + :param path: The path to the JSON file + :type path: string + + :returns: The loaded data + """ + with open(path, 'r') as endpoints_file: + return json.load(endpoints_file) + + +def merge_endpoints(defaults, additions): + """ + Given an existing set of endpoint data, this will deep-update it with + any similarly structured data in the additions. + + :param defaults: The existing endpoints data + :type defaults: dict + + :param defaults: The additional endpoints data + :type defaults: dict + + :returns: The modified endpoints data + :rtype: dict + """ + # We can't just do an ``defaults.update(...)`` here, as that could + # *overwrite* regions if present in both. + # We'll iterate instead, essentially doing a deeper merge. + for service, region_info in additions.items(): + # Set the default, if not present, to an empty dict. + defaults.setdefault(service, {}) + defaults[service].update(region_info) + + return defaults + + +def load_regions(): + """ + Actually load the region/endpoint information from the JSON files. + + By default, this loads from the default included ``boto/endpoints.json`` + file. + + Users can override/extend this by supplying either a ``BOTO_ENDPOINTS`` + environment variable or a ``endpoints_path`` config variable, either of + which should be an absolute path to the user's JSON file. + + :returns: The endpoints data + :rtype: dict + """ + # Load the defaults first. + endpoints = load_endpoint_json(boto.ENDPOINTS_PATH) + additional_path = None + + # Try the ENV var. If not, check the config file. + if os.environ.get('BOTO_ENDPOINTS'): + additional_path = os.environ['BOTO_ENDPOINTS'] + elif boto.config.get('Boto', 'endpoints_path'): + additional_path = boto.config.get('Boto', 'endpoints_path') + + # If there's a file provided, we'll load it & additively merge it into + # the endpoints. + if additional_path: + additional = load_endpoint_json(additional_path) + endpoints = merge_endpoints(endpoints, additional) + + return endpoints + + +def get_regions(service_name, region_cls=None, connection_cls=None): + """ + Given a service name (like ``ec2``), returns a list of ``RegionInfo`` + objects for that service. + + This leverages the ``endpoints.json`` file (+ optional user overrides) to + configure/construct all the objects. + + :param service_name: The name of the service to construct the ``RegionInfo`` + objects for. Ex: ``ec2``, ``s3``, ``sns``, etc. + :type service_name: string + + :param region_cls: (Optional) The class to use when constructing. By + default, this is ``RegionInfo``. + :type region_cls: class + + :param connection_cls: (Optional) The connection class for the + ``RegionInfo`` object. Providing this allows the ``connect`` method on + the ``RegionInfo`` to work. Default is ``None`` (no connection). + :type connection_cls: class + + :returns: A list of configured ``RegionInfo`` objects + :rtype: list + """ + endpoints = load_regions() + + if service_name not in endpoints: + raise BotoClientError( + "Service '%s' not found in endpoints." % service_name + ) + + if region_cls is None: + region_cls = RegionInfo + + region_objs = [] + + for region_name, endpoint in endpoints.get(service_name, {}).items(): + region_objs.append( + region_cls( + name=region_name, + endpoint=endpoint, + connection_cls=connection_cls + ) + ) + + return region_objs + + +class RegionInfo(object): + """ + Represents an AWS Region + """ + + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + self.connection = connection + self.name = name + self.endpoint = endpoint + self.connection_cls = connection_cls + + def __repr__(self): + return 'RegionInfo:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'regionName': + self.name = value + elif name == 'regionEndpoint': + self.endpoint = value + else: + setattr(self, name, value) + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an connection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the connection + class's constructor as keyword arguments and they will be + passed along to the connection object. + + :rtype: Connection object + :return: The connection to this regions endpoint + """ + if self.connection_cls: + return self.connection_cls(region=self, **kw_params) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/requestlog.py b/desktop/core/ext-py/boto-2.38.0/boto/requestlog.py new file mode 100644 index 0000000000000000000000000000000000000000..d8009fe76f6662373b7653c2b872c0a8cf67e1bc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/requestlog.py @@ -0,0 +1,39 @@ +import sys +from datetime import datetime +from threading import Thread +import Queue + +from boto.utils import RequestHook +from boto.compat import long_type + + +class RequestLogger(RequestHook): + """ + This class implements a request logger that uses a single thread to + write to a log file. + """ + def __init__(self, filename='/tmp/request_log.csv'): + self.request_log_file = open(filename, 'w') + self.request_log_queue = Queue.Queue(100) + Thread(target=self._request_log_worker).start() + + def handle_request_data(self, request, response, error=False): + len = 0 if error else response.getheader('Content-Length') + now = datetime.now() + time = now.strftime('%Y-%m-%d %H:%M:%S') + td = (now - request.start_time) + duration = (td.microseconds + long_type(td.seconds + td.days * 24 * 3600) * 1e6) / 1e6 + + # write output including timestamp, status code, response time, response size, request action + self.request_log_queue.put("'%s', '%s', '%s', '%s', '%s'\n" % (time, response.status, duration, len, request.params['Action'])) + + def _request_log_worker(self): + while True: + try: + item = self.request_log_queue.get(True) + self.request_log_file.write(item) + self.request_log_file.flush() + self.request_log_queue.task_done() + except: + import traceback + traceback.print_exc(file=sys.stdout) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/resultset.py b/desktop/core/ext-py/boto-2.38.0/boto/resultset.py new file mode 100644 index 0000000000000000000000000000000000000000..189a47a3a24209b73e370245b0c24d286a0ea271 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/resultset.py @@ -0,0 +1,176 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.user import User + + +class ResultSet(list): + """ + The ResultSet is used to pass results back from the Amazon services + to the client. It is light wrapper around Python's :py:class:`list` class, + with some additional methods for parsing XML results from AWS. + Because I don't really want any dependencies on external libraries, + I'm using the standard SAX parser that comes with Python. The good news is + that it's quite fast and efficient but it makes some things rather + difficult. + + You can pass in, as the marker_elem parameter, a list of tuples. + Each tuple contains a string as the first element which represents + the XML element that the resultset needs to be on the lookout for + and a Python class as the second element of the tuple. Each time the + specified element is found in the XML, a new instance of the class + will be created and popped onto the stack. + + :ivar str next_token: A hash used to assist in paging through very long + result sets. In most cases, passing this value to certain methods + will give you another 'page' of results. + """ + def __init__(self, marker_elem=None): + list.__init__(self) + if isinstance(marker_elem, list): + self.markers = marker_elem + else: + self.markers = [] + self.marker = None + self.key_marker = None + self.next_marker = None # avail when delimiter used + self.next_key_marker = None + self.next_upload_id_marker = None + self.next_version_id_marker = None + self.next_generation_marker = None + self.version_id_marker = None + self.is_truncated = False + self.next_token = None + self.status = True + + def startElement(self, name, attrs, connection): + for t in self.markers: + if name == t[0]: + obj = t[1](connection) + self.append(obj) + return obj + if name == 'Owner': + # Makes owner available for get_service and + # perhaps other lists where not handled by + # another element. + self.owner = User() + return self.owner + return None + + def to_boolean(self, value, true_value='true'): + if value == true_value: + return True + else: + return False + + def endElement(self, name, value, connection): + if name == 'IsTruncated': + self.is_truncated = self.to_boolean(value) + elif name == 'Marker': + self.marker = value + elif name == 'KeyMarker': + self.key_marker = value + elif name == 'NextMarker': + self.next_marker = value + elif name == 'NextKeyMarker': + self.next_key_marker = value + elif name == 'VersionIdMarker': + self.version_id_marker = value + elif name == 'NextVersionIdMarker': + self.next_version_id_marker = value + elif name == 'NextGenerationMarker': + self.next_generation_marker = value + elif name == 'UploadIdMarker': + self.upload_id_marker = value + elif name == 'NextUploadIdMarker': + self.next_upload_id_marker = value + elif name == 'Bucket': + self.bucket = value + elif name == 'MaxUploads': + self.max_uploads = int(value) + elif name == 'MaxItems': + self.max_items = int(value) + elif name == 'Prefix': + self.prefix = value + elif name == 'return': + self.status = self.to_boolean(value) + elif name == 'StatusCode': + self.status = self.to_boolean(value, 'Success') + elif name == 'ItemName': + self.append(value) + elif name == 'NextToken': + self.next_token = value + elif name == 'nextToken': + self.next_token = value + # Code exists which expects nextToken to be available, so we + # set it here to remain backwards-compatibile. + self.nextToken = value + elif name == 'BoxUsage': + try: + connection.box_usage += float(value) + except: + pass + elif name == 'IsValid': + self.status = self.to_boolean(value, 'True') + else: + setattr(self, name, value) + + +class BooleanResult(object): + + def __init__(self, marker_elem=None): + self.status = True + self.request_id = None + self.box_usage = None + + def __repr__(self): + if self.status: + return 'True' + else: + return 'False' + + def __nonzero__(self): + return self.status + + def startElement(self, name, attrs, connection): + return None + + def to_boolean(self, value, true_value='true'): + if value == true_value: + return True + else: + return False + + def endElement(self, name, value, connection): + if name == 'return': + self.status = self.to_boolean(value) + elif name == 'StatusCode': + self.status = self.to_boolean(value, 'Success') + elif name == 'IsValid': + self.status = self.to_boolean(value, 'True') + elif name == 'RequestId': + self.request_id = value + elif name == 'requestId': + self.request_id = value + elif name == 'BoxUsage': + self.request_id = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/roboto/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/roboto/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..792d6005489ebee62cde02066f19c5521e620451 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/roboto/__init__.py @@ -0,0 +1 @@ +# diff --git a/desktop/core/ext-py/boto-2.38.0/boto/roboto/awsqueryrequest.py b/desktop/core/ext-py/boto-2.38.0/boto/roboto/awsqueryrequest.py new file mode 100644 index 0000000000000000000000000000000000000000..793adf90ecf51376cabe7319264e24930dafec78 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/roboto/awsqueryrequest.py @@ -0,0 +1,503 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import sys +import os +import boto +import optparse +import copy +import boto.exception +import boto.roboto.awsqueryservice + +import bdb +import traceback +try: + import epdb as debugger +except ImportError: + import pdb as debugger + +def boto_except_hook(debugger_flag, debug_flag): + def excepthook(typ, value, tb): + if typ is bdb.BdbQuit: + sys.exit(1) + sys.excepthook = sys.__excepthook__ + + if debugger_flag and sys.stdout.isatty() and sys.stdin.isatty(): + if debugger.__name__ == 'epdb': + debugger.post_mortem(tb, typ, value) + else: + debugger.post_mortem(tb) + elif debug_flag: + print(traceback.print_tb(tb)) + sys.exit(1) + else: + print(value) + sys.exit(1) + + return excepthook + +class Line(object): + + def __init__(self, fmt, data, label): + self.fmt = fmt + self.data = data + self.label = label + self.line = '%s\t' % label + self.printed = False + + def append(self, datum): + self.line += '%s\t' % datum + + def print_it(self): + if not self.printed: + print(self.line) + self.printed = True + +class RequiredParamError(boto.exception.BotoClientError): + + def __init__(self, required): + self.required = required + s = 'Required parameters are missing: %s' % self.required + super(RequiredParamError, self).__init__(s) + +class EncoderError(boto.exception.BotoClientError): + + def __init__(self, error_msg): + s = 'Error encoding value (%s)' % error_msg + super(EncoderError, self).__init__(s) + +class FilterError(boto.exception.BotoClientError): + + def __init__(self, filters): + self.filters = filters + s = 'Unknown filters: %s' % self.filters + super(FilterError, self).__init__(s) + +class Encoder(object): + + @classmethod + def encode(cls, p, rp, v, label=None): + if p.name.startswith('_'): + return + try: + mthd = getattr(cls, 'encode_'+p.ptype) + mthd(p, rp, v, label) + except AttributeError: + raise EncoderError('Unknown type: %s' % p.ptype) + + @classmethod + def encode_string(cls, p, rp, v, l): + if l: + label = l + else: + label = p.name + rp[label] = v + + encode_file = encode_string + encode_enum = encode_string + + @classmethod + def encode_integer(cls, p, rp, v, l): + if l: + label = l + else: + label = p.name + rp[label] = '%d' % v + + @classmethod + def encode_boolean(cls, p, rp, v, l): + if l: + label = l + else: + label = p.name + if v: + v = 'true' + else: + v = 'false' + rp[label] = v + + @classmethod + def encode_datetime(cls, p, rp, v, l): + if l: + label = l + else: + label = p.name + rp[label] = v + + @classmethod + def encode_array(cls, p, rp, v, l): + v = boto.utils.mklist(v) + if l: + label = l + else: + label = p.name + label = label + '.%d' + for i, value in enumerate(v): + rp[label%(i+1)] = value + +class AWSQueryRequest(object): + + ServiceClass = None + + Description = '' + Params = [] + Args = [] + Filters = [] + Response = {} + + CLITypeMap = {'string' : 'string', + 'integer' : 'int', + 'int' : 'int', + 'enum' : 'choice', + 'datetime' : 'string', + 'dateTime' : 'string', + 'file' : 'string', + 'boolean' : None} + + @classmethod + def name(cls): + return cls.__name__ + + def __init__(self, **args): + self.args = args + self.parser = None + self.cli_options = None + self.cli_args = None + self.cli_output_format = None + self.connection = None + self.list_markers = [] + self.item_markers = [] + self.request_params = {} + self.connection_args = None + + def __repr__(self): + return self.name() + + def get_connection(self, **args): + if self.connection is None: + self.connection = self.ServiceClass(**args) + return self.connection + + @property + def status(self): + retval = None + if self.http_response is not None: + retval = self.http_response.status + return retval + + @property + def reason(self): + retval = None + if self.http_response is not None: + retval = self.http_response.reason + return retval + + @property + def request_id(self): + retval = None + if self.aws_response is not None: + retval = getattr(self.aws_response, 'requestId') + return retval + + def process_filters(self): + filters = self.args.get('filters', []) + filter_names = [f['name'] for f in self.Filters] + unknown_filters = [f for f in filters if f not in filter_names] + if unknown_filters: + raise FilterError('Unknown filters: %s' % unknown_filters) + for i, filter in enumerate(self.Filters): + name = filter['name'] + if name in filters: + self.request_params['Filter.%d.Name' % (i+1)] = name + for j, value in enumerate(boto.utils.mklist(filters[name])): + Encoder.encode(filter, self.request_params, value, + 'Filter.%d.Value.%d' % (i+1, j+1)) + + def process_args(self, **args): + """ + Responsible for walking through Params defined for the request and: + + * Matching them with keyword parameters passed to the request + constructor or via the command line. + * Checking to see if all required parameters have been specified + and raising an exception, if not. + * Encoding each value into the set of request parameters that will + be sent in the request to the AWS service. + """ + self.args.update(args) + self.connection_args = copy.copy(self.args) + if 'debug' in self.args and self.args['debug'] >= 2: + boto.set_stream_logger(self.name()) + required = [p.name for p in self.Params+self.Args if not p.optional] + for param in self.Params+self.Args: + if param.long_name: + python_name = param.long_name.replace('-', '_') + else: + python_name = boto.utils.pythonize_name(param.name, '_') + value = None + if python_name in self.args: + value = self.args[python_name] + if value is None: + value = param.default + if value is not None: + if param.name in required: + required.remove(param.name) + if param.request_param: + if param.encoder: + param.encoder(param, self.request_params, value) + else: + Encoder.encode(param, self.request_params, value) + if python_name in self.args: + del self.connection_args[python_name] + if required: + l = [] + for p in self.Params+self.Args: + if p.name in required: + if p.short_name and p.long_name: + l.append('(%s, %s)' % (p.optparse_short_name, + p.optparse_long_name)) + elif p.short_name: + l.append('(%s)' % p.optparse_short_name) + else: + l.append('(%s)' % p.optparse_long_name) + raise RequiredParamError(','.join(l)) + boto.log.debug('request_params: %s' % self.request_params) + self.process_markers(self.Response) + + def process_markers(self, fmt, prev_name=None): + if fmt and fmt['type'] == 'object': + for prop in fmt['properties']: + self.process_markers(prop, fmt['name']) + elif fmt and fmt['type'] == 'array': + self.list_markers.append(prev_name) + self.item_markers.append(fmt['name']) + + def send(self, verb='GET', **args): + self.process_args(**args) + self.process_filters() + conn = self.get_connection(**self.connection_args) + self.http_response = conn.make_request(self.name(), + self.request_params, + verb=verb) + self.body = self.http_response.read() + boto.log.debug(self.body) + if self.http_response.status == 200: + self.aws_response = boto.jsonresponse.Element(list_marker=self.list_markers, + item_marker=self.item_markers) + h = boto.jsonresponse.XmlHandler(self.aws_response, self) + h.parse(self.body) + return self.aws_response + else: + boto.log.error('%s %s' % (self.http_response.status, + self.http_response.reason)) + boto.log.error('%s' % self.body) + raise conn.ResponseError(self.http_response.status, + self.http_response.reason, + self.body) + + def add_standard_options(self): + group = optparse.OptionGroup(self.parser, 'Standard Options') + # add standard options that all commands get + group.add_option('-D', '--debug', action='store_true', + help='Turn on all debugging output') + group.add_option('--debugger', action='store_true', + default=False, + help='Enable interactive debugger on error') + group.add_option('-U', '--url', action='store', + help='Override service URL with value provided') + group.add_option('--region', action='store', + help='Name of the region to connect to') + group.add_option('-I', '--access-key-id', action='store', + help='Override access key value') + group.add_option('-S', '--secret-key', action='store', + help='Override secret key value') + group.add_option('--version', action='store_true', + help='Display version string') + if self.Filters: + self.group.add_option('--help-filters', action='store_true', + help='Display list of available filters') + self.group.add_option('--filter', action='append', + metavar=' name=value', + help='A filter for limiting the results') + self.parser.add_option_group(group) + + def process_standard_options(self, options, args, d): + if hasattr(options, 'help_filters') and options.help_filters: + print('Available filters:') + for filter in self.Filters: + print('%s\t%s' % (filter.name, filter.doc)) + sys.exit(0) + if options.debug: + self.args['debug'] = 2 + if options.url: + self.args['url'] = options.url + if options.region: + self.args['region'] = options.region + if options.access_key_id: + self.args['aws_access_key_id'] = options.access_key_id + if options.secret_key: + self.args['aws_secret_access_key'] = options.secret_key + if options.version: + # TODO - Where should the version # come from? + print('version x.xx') + exit(0) + sys.excepthook = boto_except_hook(options.debugger, + options.debug) + + def get_usage(self): + s = 'usage: %prog [options] ' + l = [ a.long_name for a in self.Args ] + s += ' '.join(l) + for a in self.Args: + if a.doc: + s += '\n\n\t%s - %s' % (a.long_name, a.doc) + return s + + def build_cli_parser(self): + self.parser = optparse.OptionParser(description=self.Description, + usage=self.get_usage()) + self.add_standard_options() + for param in self.Params: + ptype = action = choices = None + if param.ptype in self.CLITypeMap: + ptype = self.CLITypeMap[param.ptype] + action = 'store' + if param.ptype == 'boolean': + action = 'store_true' + elif param.ptype == 'array': + if len(param.items) == 1: + ptype = param.items[0]['type'] + action = 'append' + elif param.cardinality != 1: + action = 'append' + if ptype or action == 'store_true': + if param.short_name: + self.parser.add_option(param.optparse_short_name, + param.optparse_long_name, + action=action, type=ptype, + choices=param.choices, + help=param.doc) + elif param.long_name: + self.parser.add_option(param.optparse_long_name, + action=action, type=ptype, + choices=param.choices, + help=param.doc) + + def do_cli(self): + if not self.parser: + self.build_cli_parser() + self.cli_options, self.cli_args = self.parser.parse_args() + d = {} + self.process_standard_options(self.cli_options, self.cli_args, d) + for param in self.Params: + if param.long_name: + p_name = param.long_name.replace('-', '_') + else: + p_name = boto.utils.pythonize_name(param.name) + value = getattr(self.cli_options, p_name) + if param.ptype == 'file' and value: + if value == '-': + value = sys.stdin.read() + else: + path = os.path.expanduser(value) + path = os.path.expandvars(path) + if os.path.isfile(path): + fp = open(path) + value = fp.read() + fp.close() + else: + self.parser.error('Unable to read file: %s' % path) + d[p_name] = value + for arg in self.Args: + if arg.long_name: + p_name = arg.long_name.replace('-', '_') + else: + p_name = boto.utils.pythonize_name(arg.name) + value = None + if arg.cardinality == 1: + if len(self.cli_args) >= 1: + value = self.cli_args[0] + else: + value = self.cli_args + d[p_name] = value + self.args.update(d) + if hasattr(self.cli_options, 'filter') and self.cli_options.filter: + d = {} + for filter in self.cli_options.filter: + name, value = filter.split('=') + d[name] = value + if 'filters' in self.args: + self.args['filters'].update(d) + else: + self.args['filters'] = d + try: + response = self.main() + self.cli_formatter(response) + except RequiredParamError as e: + print(e) + sys.exit(1) + except self.ServiceClass.ResponseError as err: + print('Error(%s): %s' % (err.error_code, err.error_message)) + sys.exit(1) + except boto.roboto.awsqueryservice.NoCredentialsError as err: + print('Unable to find credentials.') + sys.exit(1) + except Exception as e: + print(e) + sys.exit(1) + + def _generic_cli_formatter(self, fmt, data, label=''): + if fmt['type'] == 'object': + for prop in fmt['properties']: + if 'name' in fmt: + if fmt['name'] in data: + data = data[fmt['name']] + if fmt['name'] in self.list_markers: + label = fmt['name'] + if label[-1] == 's': + label = label[0:-1] + label = label.upper() + self._generic_cli_formatter(prop, data, label) + elif fmt['type'] == 'array': + for item in data: + line = Line(fmt, item, label) + if isinstance(item, dict): + for field_name in item: + line.append(item[field_name]) + elif isinstance(item, basestring): + line.append(item) + line.print_it() + + def cli_formatter(self, data): + """ + This method is responsible for formatting the output for the + command line interface. The default behavior is to call the + generic CLI formatter which attempts to print something + reasonable. If you want specific formatting, you should + override this method and do your own thing. + + :type data: dict + :param data: The data returned by AWS. + """ + if data: + self._generic_cli_formatter(self.Response, data) + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/roboto/awsqueryservice.py b/desktop/core/ext-py/boto-2.38.0/boto/roboto/awsqueryservice.py new file mode 100644 index 0000000000000000000000000000000000000000..9bf95ac2be01ca8a611857f3cefc15371d077d0b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/roboto/awsqueryservice.py @@ -0,0 +1,122 @@ +from __future__ import print_function +import os +import urlparse +import boto +import boto.connection +import boto.jsonresponse +import boto.exception +from boto.roboto import awsqueryrequest + +class NoCredentialsError(boto.exception.BotoClientError): + + def __init__(self): + s = 'Unable to find credentials' + super(NoCredentialsError, self).__init__(s) + +class AWSQueryService(boto.connection.AWSQueryConnection): + + Name = '' + Description = '' + APIVersion = '' + Authentication = 'sign-v2' + Path = '/' + Port = 443 + Provider = 'aws' + EnvURL = 'AWS_URL' + + Regions = [] + + def __init__(self, **args): + self.args = args + self.check_for_credential_file() + self.check_for_env_url() + if 'host' not in self.args: + if self.Regions: + region_name = self.args.get('region_name', + self.Regions[0]['name']) + for region in self.Regions: + if region['name'] == region_name: + self.args['host'] = region['endpoint'] + if 'path' not in self.args: + self.args['path'] = self.Path + if 'port' not in self.args: + self.args['port'] = self.Port + try: + super(AWSQueryService, self).__init__(**self.args) + self.aws_response = None + except boto.exception.NoAuthHandlerFound: + raise NoCredentialsError() + + def check_for_credential_file(self): + """ + Checks for the existence of an AWS credential file. + If the environment variable AWS_CREDENTIAL_FILE is + set and points to a file, that file will be read and + will be searched credentials. + Note that if credentials have been explicitelypassed + into the class constructor, those values always take + precedence. + """ + if 'AWS_CREDENTIAL_FILE' in os.environ: + path = os.environ['AWS_CREDENTIAL_FILE'] + path = os.path.expanduser(path) + path = os.path.expandvars(path) + if os.path.isfile(path): + fp = open(path) + lines = fp.readlines() + fp.close() + for line in lines: + if line[0] != '#': + if '=' in line: + name, value = line.split('=', 1) + if name.strip() == 'AWSAccessKeyId': + if 'aws_access_key_id' not in self.args: + value = value.strip() + self.args['aws_access_key_id'] = value + elif name.strip() == 'AWSSecretKey': + if 'aws_secret_access_key' not in self.args: + value = value.strip() + self.args['aws_secret_access_key'] = value + else: + print('Warning: unable to read AWS_CREDENTIAL_FILE') + + def check_for_env_url(self): + """ + First checks to see if a url argument was explicitly passed + in. If so, that will be used. If not, it checks for the + existence of the environment variable specified in ENV_URL. + If this is set, it should contain a fully qualified URL to the + service you want to use. + Note that any values passed explicitly to the class constructor + will take precedence. + """ + url = self.args.get('url', None) + if url: + del self.args['url'] + if not url and self.EnvURL in os.environ: + url = os.environ[self.EnvURL] + if url: + rslt = urlparse.urlparse(url) + if 'is_secure' not in self.args: + if rslt.scheme == 'https': + self.args['is_secure'] = True + else: + self.args['is_secure'] = False + + host = rslt.netloc + port = None + l = host.split(':') + if len(l) > 1: + host = l[0] + port = int(l[1]) + if 'host' not in self.args: + self.args['host'] = host + if port and 'port' not in self.args: + self.args['port'] = port + + if rslt.path and 'path' not in self.args: + self.args['path'] = rslt.path + + def _required_auth_capability(self): + return [self.Authentication] + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/roboto/param.py b/desktop/core/ext-py/boto-2.38.0/boto/roboto/param.py new file mode 100644 index 0000000000000000000000000000000000000000..35a25b4af5c6bdce153171184531d293e7e9a155 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/roboto/param.py @@ -0,0 +1,147 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import os + +class Converter(object): + + @classmethod + def convert_string(cls, param, value): + # TODO: could do length validation, etc. here + if not isinstance(value, basestring): + raise ValueError + return value + + @classmethod + def convert_integer(cls, param, value): + # TODO: could do range checking here + return int(value) + + @classmethod + def convert_boolean(cls, param, value): + """ + For command line arguments, just the presence + of the option means True so just return True + """ + return True + + @classmethod + def convert_file(cls, param, value): + if os.path.exists(value) and not os.path.isdir(value): + return value + raise ValueError + + @classmethod + def convert_dir(cls, param, value): + if os.path.isdir(value): + return value + raise ValueError + + @classmethod + def convert(cls, param, value): + try: + if hasattr(cls, 'convert_'+param.ptype): + mthd = getattr(cls, 'convert_'+param.ptype) + else: + mthd = cls.convert_string + return mthd(param, value) + except: + raise ValidationException(param, '') + +class Param(Converter): + + def __init__(self, name=None, ptype='string', optional=True, + short_name=None, long_name=None, doc='', + metavar=None, cardinality=1, default=None, + choices=None, encoder=None, request_param=True): + self.name = name + self.ptype = ptype + self.optional = optional + self.short_name = short_name + self.long_name = long_name + self.doc = doc + self.metavar = metavar + self.cardinality = cardinality + self.default = default + self.choices = choices + self.encoder = encoder + self.request_param = request_param + + @property + def optparse_long_name(self): + ln = None + if self.long_name: + ln = '--%s' % self.long_name + return ln + + @property + def synopsis_long_name(self): + ln = None + if self.long_name: + ln = '--%s' % self.long_name + return ln + + @property + def getopt_long_name(self): + ln = None + if self.long_name: + ln = '%s' % self.long_name + if self.ptype != 'boolean': + ln += '=' + return ln + + @property + def optparse_short_name(self): + sn = None + if self.short_name: + sn = '-%s' % self.short_name + return sn + + @property + def synopsis_short_name(self): + sn = None + if self.short_name: + sn = '-%s' % self.short_name + return sn + + @property + def getopt_short_name(self): + sn = None + if self.short_name: + sn = '%s' % self.short_name + if self.ptype != 'boolean': + sn += ':' + return sn + + def convert(self, value): + """ + Convert a string value as received in the command line + tools and convert to the appropriate type of value. + Raise a ValidationError if the value can't be converted. + + :type value: str + :param value: The value to convert. This should always + be a string. + """ + return super(Param, self).convert(self,value) + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/route53/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/route53/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b131f921daa3677c80e532cfcddbbb7037493cb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/route53/__init__.py @@ -0,0 +1,87 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +# this is here for backward compatibility +# originally, the Route53Connection class was defined here +from boto.route53.connection import Route53Connection +from boto.regioninfo import RegionInfo, get_regions + + +class Route53RegionInfo(RegionInfo): + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an connection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the connection + class's constructor as keyword arguments and they will be + passed along to the connection object. + + :rtype: Connection object + :return: The connection to this regions endpoint + """ + if self.connection_cls: + return self.connection_cls(host=self.endpoint, **kw_params) + + +def regions(): + """ + Get all available regions for the Route53 service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + regions = get_regions( + 'route53', + region_cls=Route53RegionInfo, + connection_cls=Route53Connection + ) + + # For historical reasons, we had a "universal" endpoint as well. + regions.append( + Route53RegionInfo( + name='universal', + endpoint='route53.amazonaws.com', + connection_cls=Route53Connection + ) + ) + + return regions + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.route53.connection.Route53Connection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.route53.connection.Route53Connection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/route53/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/route53/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..23e05ea526bf46690b5fd468de9db81a711318b2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/route53/connection.py @@ -0,0 +1,608 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton +# www.bluepines.org +# Copyright (c) 2012 42 Lines Inc., Jim Browne +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.route53 import exception +import random +import uuid +import xml.sax + +import boto +from boto.connection import AWSAuthConnection +from boto import handler +import boto.jsonresponse +from boto.route53.record import ResourceRecordSets +from boto.route53.zone import Zone +from boto.compat import six, urllib + + +HZXML = """ + + %(name)s + %(caller_ref)s + + %(comment)s + +""" + +HZPXML = """ + + %(name)s + + %(vpc_id)s + %(vpc_region)s + + %(caller_ref)s + + %(comment)s + +""" + +# boto.set_stream_logger('dns') + + +class Route53Connection(AWSAuthConnection): + DefaultHost = 'route53.amazonaws.com' + """The default Route53 API endpoint to connect to.""" + + Version = '2013-04-01' + """Route53 API version.""" + + XMLNameSpace = 'https://route53.amazonaws.com/doc/2013-04-01/' + """XML schema for this Route53 API version.""" + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + port=None, proxy=None, proxy_port=None, + host=DefaultHost, debug=0, security_token=None, + validate_certs=True, https_connection_factory=None, + profile_name=None): + super(Route53Connection, self).__init__( + host, + aws_access_key_id, aws_secret_access_key, + True, port, proxy, proxy_port, debug=debug, + security_token=security_token, + validate_certs=validate_certs, + https_connection_factory=https_connection_factory, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['route53'] + + def make_request(self, action, path, headers=None, data='', params=None): + if params: + pairs = [] + for key, val in six.iteritems(params): + if val is None: + continue + pairs.append(key + '=' + urllib.parse.quote(str(val))) + path += '?' + '&'.join(pairs) + return super(Route53Connection, self).make_request( + action, path, headers, data, + retry_handler=self._retry_handler) + + # Hosted Zones + + def get_all_hosted_zones(self, start_marker=None, zone_list=None): + """ + Returns a Python data structure with information about all + Hosted Zones defined for the AWS account. + + :param int start_marker: start marker to pass when fetching additional + results after a truncated list + :param list zone_list: a HostedZones list to prepend to results + """ + params = {} + if start_marker: + params = {'marker': start_marker} + response = self.make_request('GET', '/%s/hostedzone' % self.Version, + params=params) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='HostedZones', + item_marker=('HostedZone',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + if zone_list: + e['ListHostedZonesResponse']['HostedZones'].extend(zone_list) + while 'NextMarker' in e['ListHostedZonesResponse']: + next_marker = e['ListHostedZonesResponse']['NextMarker'] + zone_list = e['ListHostedZonesResponse']['HostedZones'] + e = self.get_all_hosted_zones(next_marker, zone_list) + return e + + def get_hosted_zone(self, hosted_zone_id): + """ + Get detailed information about a particular Hosted Zone. + + :type hosted_zone_id: str + :param hosted_zone_id: The unique identifier for the Hosted Zone + + """ + uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id) + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='NameServers', + item_marker=('NameServer',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def get_hosted_zone_by_name(self, hosted_zone_name): + """ + Get detailed information about a particular Hosted Zone. + + :type hosted_zone_name: str + :param hosted_zone_name: The fully qualified domain name for the Hosted + Zone + + """ + if hosted_zone_name[-1] != '.': + hosted_zone_name += '.' + all_hosted_zones = self.get_all_hosted_zones() + for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']: + # check that they gave us the FQDN for their zone + if zone['Name'] == hosted_zone_name: + return self.get_hosted_zone(zone['Id'].split('/')[-1]) + + def create_hosted_zone(self, domain_name, caller_ref=None, comment='', + private_zone=False, vpc_id=None, vpc_region=None): + """ + Create a new Hosted Zone. Returns a Python data structure with + information about the newly created Hosted Zone. + + :type domain_name: str + :param domain_name: The name of the domain. This should be a + fully-specified domain, and should end with a final period + as the last label indication. If you omit the final period, + Amazon Route 53 assumes the domain is relative to the root. + This is the name you have registered with your DNS registrar. + It is also the name you will delegate from your registrar to + the Amazon Route 53 delegation servers returned in + response to this request.A list of strings with the image + IDs wanted. + + :type caller_ref: str + :param caller_ref: A unique string that identifies the request + and that allows failed CreateHostedZone requests to be retried + without the risk of executing the operation twice. If you don't + provide a value for this, boto will generate a Type 4 UUID and + use that. + + :type comment: str + :param comment: Any comments you want to include about the hosted + zone. + + :type private_zone: bool + :param private_zone: Set True if creating a private hosted zone. + + :type vpc_id: str + :param vpc_id: When creating a private hosted zone, the VPC Id to + associate to is required. + + :type vpc_region: str + :param vpc_id: When creating a private hosted zone, the region of + the associated VPC is required. + + """ + if caller_ref is None: + caller_ref = str(uuid.uuid4()) + if private_zone: + params = {'name': domain_name, + 'caller_ref': caller_ref, + 'comment': comment, + 'vpc_id': vpc_id, + 'vpc_region': vpc_region, + 'xmlns': self.XMLNameSpace} + xml_body = HZPXML % params + else: + params = {'name': domain_name, + 'caller_ref': caller_ref, + 'comment': comment, + 'xmlns': self.XMLNameSpace} + xml_body = HZXML % params + uri = '/%s/hostedzone' % self.Version + response = self.make_request('POST', uri, + {'Content-Type': 'text/xml'}, xml_body) + body = response.read() + boto.log.debug(body) + if response.status == 201: + e = boto.jsonresponse.Element(list_marker='NameServers', + item_marker=('NameServer',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + else: + raise exception.DNSServerError(response.status, + response.reason, + body) + + def delete_hosted_zone(self, hosted_zone_id): + """ + Delete the hosted zone specified by the given id. + + :type hosted_zone_id: str + :param hosted_zone_id: The hosted zone's id + + """ + uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id) + response = self.make_request('DELETE', uri) + body = response.read() + boto.log.debug(body) + if response.status not in (200, 204): + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + # Health checks + + POSTHCXMLBody = """ + %(caller_ref)s + %(health_check)s + """ + + def create_health_check(self, health_check, caller_ref=None): + """ + Create a new Health Check + + :type health_check: HealthCheck + :param health_check: HealthCheck object + + :type caller_ref: str + :param caller_ref: A unique string that identifies the request + and that allows failed CreateHealthCheckRequest requests to be retried + without the risk of executing the operation twice. If you don't + provide a value for this, boto will generate a Type 4 UUID and + use that. + + """ + if caller_ref is None: + caller_ref = str(uuid.uuid4()) + uri = '/%s/healthcheck' % self.Version + params = {'xmlns': self.XMLNameSpace, + 'caller_ref': caller_ref, + 'health_check': health_check.to_xml() + } + xml_body = self.POSTHCXMLBody % params + response = self.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body) + body = response.read() + boto.log.debug(body) + if response.status == 201: + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + else: + raise exception.DNSServerError(response.status, response.reason, body) + + def get_list_health_checks(self, maxitems=None, marker=None): + """ + Return a list of health checks + + :type maxitems: int + :param maxitems: Maximum number of items to return + + :type marker: str + :param marker: marker to get next set of items to list + + """ + + params = {} + if maxitems is not None: + params['maxitems'] = maxitems + if marker is not None: + params['marker'] = marker + + uri = '/%s/healthcheck' % (self.Version, ) + response = self.make_request('GET', uri, params=params) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='HealthChecks', + item_marker=('HealthCheck',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def get_checker_ip_ranges(self): + """ + Return a list of Route53 healthcheck IP ranges + """ + uri = '/%s/checkeripranges' % self.Version + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='CheckerIpRanges', item_marker=('member',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def delete_health_check(self, health_check_id): + """ + Delete a health check + + :type health_check_id: str + :param health_check_id: ID of the health check to delete + + """ + uri = '/%s/healthcheck/%s' % (self.Version, health_check_id) + response = self.make_request('DELETE', uri) + body = response.read() + boto.log.debug(body) + if response.status not in (200, 204): + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + # Resource Record Sets + + def get_all_rrsets(self, hosted_zone_id, type=None, + name=None, identifier=None, maxitems=None): + """ + Retrieve the Resource Record Sets defined for this Hosted Zone. + Returns the raw XML data returned by the Route53 call. + + :type hosted_zone_id: str + :param hosted_zone_id: The unique identifier for the Hosted Zone + + :type type: str + :param type: The type of resource record set to begin the record + listing from. Valid choices are: + + * A + * AAAA + * CNAME + * MX + * NS + * PTR + * SOA + * SPF + * SRV + * TXT + + Valid values for weighted resource record sets: + + * A + * AAAA + * CNAME + * TXT + + Valid values for Zone Apex Aliases: + + * A + * AAAA + + :type name: str + :param name: The first name in the lexicographic ordering of domain + names to be retrieved + + :type identifier: str + :param identifier: In a hosted zone that includes weighted resource + record sets (multiple resource record sets with the same DNS + name and type that are differentiated only by SetIdentifier), + if results were truncated for a given DNS name and type, + the value of SetIdentifier for the next resource record + set that has the current DNS name and type + + :type maxitems: int + :param maxitems: The maximum number of records + + """ + params = {'type': type, 'name': name, + 'identifier': identifier, 'maxitems': maxitems} + uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id) + response = self.make_request('GET', uri, params=params) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + rs = ResourceRecordSets(connection=self, hosted_zone_id=hosted_zone_id) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + + def change_rrsets(self, hosted_zone_id, xml_body): + """ + Create or change the authoritative DNS information for this + Hosted Zone. + Returns a Python data structure with information about the set of + changes, including the Change ID. + + :type hosted_zone_id: str + :param hosted_zone_id: The unique identifier for the Hosted Zone + + :type xml_body: str + :param xml_body: The list of changes to be made, defined in the + XML schema defined by the Route53 service. + + """ + uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id) + response = self.make_request('POST', uri, + {'Content-Type': 'text/xml'}, + xml_body) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def get_change(self, change_id): + """ + Get information about a proposed set of changes, as submitted + by the change_rrsets method. + Returns a Python data structure with status information about the + changes. + + :type change_id: str + :param change_id: The unique identifier for the set of changes. + This ID is returned in the response to the change_rrsets method. + + """ + uri = '/%s/change/%s' % (self.Version, change_id) + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def create_zone(self, name, private_zone=False, + vpc_id=None, vpc_region=None): + """ + Create a new Hosted Zone. Returns a Zone object for the newly + created Hosted Zone. + + :type name: str + :param name: The name of the domain. This should be a + fully-specified domain, and should end with a final period + as the last label indication. If you omit the final period, + Amazon Route 53 assumes the domain is relative to the root. + This is the name you have registered with your DNS registrar. + It is also the name you will delegate from your registrar to + the Amazon Route 53 delegation servers returned in + response to this request. + + :type private_zone: bool + :param private_zone: Set True if creating a private hosted zone. + + :type vpc_id: str + :param vpc_id: When creating a private hosted zone, the VPC Id to + associate to is required. + + :type vpc_region: str + :param vpc_id: When creating a private hosted zone, the region of + the associated VPC is required. + """ + zone = self.create_hosted_zone(name, private_zone=private_zone, + vpc_id=vpc_id, vpc_region=vpc_region) + return Zone(self, zone['CreateHostedZoneResponse']['HostedZone']) + + def get_zone(self, name): + """ + Returns a Zone object for the specified Hosted Zone. + + :param name: The name of the domain. This should be a + fully-specified domain, and should end with a final period + as the last label indication. + """ + name = self._make_qualified(name) + for zone in self.get_zones(): + if name == zone.name: + return zone + + def get_zones(self): + """ + Returns a list of Zone objects, one for each of the Hosted + Zones defined for the AWS account. + + :rtype: list + :returns: A list of Zone objects. + + """ + zones = self.get_all_hosted_zones() + return [Zone(self, zone) for zone in + zones['ListHostedZonesResponse']['HostedZones']] + + def _make_qualified(self, value): + """ + Ensure passed domain names end in a period (.) character. + This will usually make a domain fully qualified. + """ + if type(value) in [list, tuple, set]: + new_list = [] + for record in value: + if record and not record[-1] == '.': + new_list.append("%s." % record) + else: + new_list.append(record) + return new_list + else: + value = value.strip() + if value and not value[-1] == '.': + value = "%s." % value + return value + + def _retry_handler(self, response, i, next_sleep): + status = None + boto.log.debug("Saw HTTP status: %s" % response.status) + + if response.status == 400: + code = response.getheader('Code') + + if code: + # This is a case where we need to ignore a 400 error, as + # Route53 returns this. See + # http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html + if 'PriorRequestNotComplete' in code: + error = 'PriorRequestNotComplete' + elif 'Throttling' in code: + error = 'Throttling' + else: + return status + msg = "%s, retry attempt %s" % ( + error, + i + ) + next_sleep = min(random.random() * (2 ** i), + boto.config.get('Boto', 'max_retry_delay', 60)) + i += 1 + status = (msg, i, next_sleep) + + return status diff --git a/desktop/core/ext-py/boto-2.38.0/boto/route53/domains/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/route53/domains/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ecd07210d507fcdb2a3e28d78a9e0a122e6fd298 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/route53/domains/__init__.py @@ -0,0 +1,40 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon Route 53 Domains service. + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.route53.domains.layer1 import Route53DomainsConnection + return get_regions('route53domains', + connection_cls=Route53DomainsConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/route53/domains/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/route53/domains/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..51eb6730036503b45bba7d41811545c3395bc29f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/route53/domains/exceptions.py @@ -0,0 +1,46 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class DuplicateRequest(BotoServerError): + pass + + +class DomainLimitExceeded(BotoServerError): + pass + + +class InvalidInput(BotoServerError): + pass + + +class OperationLimitExceeded(BotoServerError): + pass + + +class UnsupportedTLD(BotoServerError): + pass + + +class TLDRulesViolation(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/route53/domains/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/route53/domains/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..537948f7b7572affa97b71a5a137d1b729e247e5 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/route53/domains/layer1.py @@ -0,0 +1,868 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.route53.domains import exceptions + + +class Route53DomainsConnection(AWSQueryConnection): + """ + + """ + APIVersion = "2014-05-15" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "route53domains.us-east-1.amazonaws.com" + ServiceName = "Route53Domains" + TargetPrefix = "Route53Domains_v20140515" + ResponseError = JSONResponseError + + _faults = { + "DuplicateRequest": exceptions.DuplicateRequest, + "DomainLimitExceeded": exceptions.DomainLimitExceeded, + "InvalidInput": exceptions.InvalidInput, + "OperationLimitExceeded": exceptions.OperationLimitExceeded, + "UnsupportedTLD": exceptions.UnsupportedTLD, + "TLDRulesViolation": exceptions.TLDRulesViolation, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(Route53DomainsConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def check_domain_availability(self, domain_name, idn_lang_code=None): + """ + This operation checks the availability of one domain name. You + can access this API without authenticating. Note that if the + availability status of a domain is pending, you must submit + another request to determine the availability of the domain + name. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + :type idn_lang_code: string + :param idn_lang_code: Reserved for future use. + + """ + params = {'DomainName': domain_name, } + if idn_lang_code is not None: + params['IdnLangCode'] = idn_lang_code + return self.make_request(action='CheckDomainAvailability', + body=json.dumps(params)) + + def disable_domain_transfer_lock(self, domain_name): + """ + This operation removes the transfer lock on the domain + (specifically the `clientTransferProhibited` status) to allow + domain transfers. We recommend you refrain from performing + this action unless you intend to transfer the domain to a + different registrar. Successful submission returns an + operation ID that you can use to track the progress and + completion of the action. If the request is not completed + successfully, the domain registrant will be notified by email. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + """ + params = {'DomainName': domain_name, } + return self.make_request(action='DisableDomainTransferLock', + body=json.dumps(params)) + + def enable_domain_transfer_lock(self, domain_name): + """ + This operation sets the transfer lock on the domain + (specifically the `clientTransferProhibited` status) to + prevent domain transfers. Successful submission returns an + operation ID that you can use to track the progress and + completion of the action. If the request is not completed + successfully, the domain registrant will be notified by email. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + """ + params = {'DomainName': domain_name, } + return self.make_request(action='EnableDomainTransferLock', + body=json.dumps(params)) + + def get_domain_detail(self, domain_name): + """ + This operation returns detailed information about the domain. + The domain's contact information is also returned as part of + the output. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + """ + params = {'DomainName': domain_name, } + return self.make_request(action='GetDomainDetail', + body=json.dumps(params)) + + def get_operation_detail(self, operation_id): + """ + This operation returns the current status of an operation that + is not completed. + + :type operation_id: string + :param operation_id: The identifier for the operation for which you + want to get the status. Amazon Route 53 returned the identifier in + the response to the original request. + Type: String + + Default: None + + Required: Yes + + """ + params = {'OperationId': operation_id, } + return self.make_request(action='GetOperationDetail', + body=json.dumps(params)) + + def list_domains(self, marker=None, max_items=None): + """ + This operation returns all the domain names registered with + Amazon Route 53 for the current AWS account. + + :type marker: string + :param marker: For an initial request for a list of domains, omit this + element. If the number of domains that are associated with the + current AWS account is greater than the value that you specified + for `MaxItems`, you can use `Marker` to return additional domains. + Get the value of `NextPageMarker` from the previous response, and + submit another request that includes the value of `NextPageMarker` + in the `Marker` element. + Type: String + + Default: None + + Constraints: The marker must match the value specified in the previous + request. + + Required: No + + :type max_items: integer + :param max_items: Number of domains to be returned. + Type: Integer + + Default: 20 + + Constraints: A numeral between 1 and 100. + + Required: No + + """ + params = {} + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + return self.make_request(action='ListDomains', + body=json.dumps(params)) + + def list_operations(self, marker=None, max_items=None): + """ + This operation returns the operation IDs of operations that + are not yet complete. + + :type marker: string + :param marker: For an initial request for a list of operations, omit + this element. If the number of operations that are not yet complete + is greater than the value that you specified for `MaxItems`, you + can use `Marker` to return additional operations. Get the value of + `NextPageMarker` from the previous response, and submit another + request that includes the value of `NextPageMarker` in the `Marker` + element. + Type: String + + Default: None + + Required: No + + :type max_items: integer + :param max_items: Number of domains to be returned. + Type: Integer + + Default: 20 + + Constraints: A value between 1 and 100. + + Required: No + + """ + params = {} + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + return self.make_request(action='ListOperations', + body=json.dumps(params)) + + def register_domain(self, domain_name, duration_in_years, admin_contact, + registrant_contact, tech_contact, idn_lang_code=None, + auto_renew=None, privacy_protect_admin_contact=None, + privacy_protect_registrant_contact=None, + privacy_protect_tech_contact=None): + """ + This operation registers a domain. Domains are registered by + the AWS registrar partner, Gandi. For some top-level domains + (TLDs), this operation requires extra parameters. + + When you register a domain, Amazon Route 53 does the + following: + + + + Creates a Amazon Route 53 hosted zone that has the same name + as the domain. Amazon Route 53 assigns four name servers to + your hosted zone and automatically updates your domain + registration with the names of these name servers. + + Enables autorenew, so your domain registration will renew + automatically each year. We'll notify you in advance of the + renewal date so you can choose whether to renew the + registration. + + Optionally enables privacy protection, so WHOIS queries + return contact information for our registrar partner, Gandi, + instead of the information you entered for registrant, admin, + and tech contacts. + + If registration is successful, returns an operation ID that + you can use to track the progress and completion of the + action. If the request is not completed successfully, the + domain registrant is notified by email. + + Charges your AWS account an amount based on the top-level + domain. For more information, see `Amazon Route 53 Pricing`_. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + :type idn_lang_code: string + :param idn_lang_code: Reserved for future use. + + :type duration_in_years: integer + :param duration_in_years: The number of years the domain will be + registered. Domains are registered for a minimum of one year. The + maximum period depends on the top-level domain. + Type: Integer + + Default: 1 + + Valid values: Integer from 1 to 10 + + Required: Yes + + :type auto_renew: boolean + :param auto_renew: Indicates whether the domain will be automatically + renewed ( `True`) or not ( `False`). Autorenewal only takes effect + after the account is charged. + Type: Boolean + + Valid values: `True` | `False` + + Default: `True` + + Required: No + + :type admin_contact: dict + :param admin_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type registrant_contact: dict + :param registrant_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type tech_contact: dict + :param tech_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type privacy_protect_admin_contact: boolean + :param privacy_protect_admin_contact: Whether you want to conceal + contact information from WHOIS queries. If you specify true, WHOIS + ("who is") queries will return contact information for our + registrar partner, Gandi, instead of the contact information that + you enter. + Type: Boolean + + Default: `True` + + Valid values: `True` | `False` + + Required: No + + :type privacy_protect_registrant_contact: boolean + :param privacy_protect_registrant_contact: Whether you want to conceal + contact information from WHOIS queries. If you specify true, WHOIS + ("who is") queries will return contact information for our + registrar partner, Gandi, instead of the contact information that + you enter. + Type: Boolean + + Default: `True` + + Valid values: `True` | `False` + + Required: No + + :type privacy_protect_tech_contact: boolean + :param privacy_protect_tech_contact: Whether you want to conceal + contact information from WHOIS queries. If you specify true, WHOIS + ("who is") queries will return contact information for our + registrar partner, Gandi, instead of the contact information that + you enter. + Type: Boolean + + Default: `True` + + Valid values: `True` | `False` + + Required: No + + """ + params = { + 'DomainName': domain_name, + 'DurationInYears': duration_in_years, + 'AdminContact': admin_contact, + 'RegistrantContact': registrant_contact, + 'TechContact': tech_contact, + } + if idn_lang_code is not None: + params['IdnLangCode'] = idn_lang_code + if auto_renew is not None: + params['AutoRenew'] = auto_renew + if privacy_protect_admin_contact is not None: + params['PrivacyProtectAdminContact'] = privacy_protect_admin_contact + if privacy_protect_registrant_contact is not None: + params['PrivacyProtectRegistrantContact'] = privacy_protect_registrant_contact + if privacy_protect_tech_contact is not None: + params['PrivacyProtectTechContact'] = privacy_protect_tech_contact + return self.make_request(action='RegisterDomain', + body=json.dumps(params)) + + def retrieve_domain_auth_code(self, domain_name): + """ + This operation returns the AuthCode for the domain. To + transfer a domain to another registrar, you provide this value + to the new registrar. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + """ + params = {'DomainName': domain_name, } + return self.make_request(action='RetrieveDomainAuthCode', + body=json.dumps(params)) + + def transfer_domain(self, domain_name, duration_in_years, nameservers, + admin_contact, registrant_contact, tech_contact, + idn_lang_code=None, auth_code=None, auto_renew=None, + privacy_protect_admin_contact=None, + privacy_protect_registrant_contact=None, + privacy_protect_tech_contact=None): + """ + This operation transfers a domain from another registrar to + Amazon Route 53. Domains are registered by the AWS registrar, + Gandi upon transfer. + + To transfer a domain, you need to meet all the domain transfer + criteria, including the following: + + + + You must supply nameservers to transfer a domain. + + You must disable the domain transfer lock (if any) before + transferring the domain. + + A minimum of 60 days must have elapsed since the domain's + registration or last transfer. + + + We recommend you use the Amazon Route 53 as the DNS service + for your domain. You can create a hosted zone in Amazon Route + 53 for your current domain before transferring your domain. + + Note that upon transfer, the domain duration is extended for a + year if not otherwise specified. Autorenew is enabled by + default. + + If the transfer is successful, this method returns an + operation ID that you can use to track the progress and + completion of the action. If the request is not completed + successfully, the domain registrant will be notified by email. + + Transferring domains charges your AWS account an amount based + on the top-level domain. For more information, see `Amazon + Route 53 Pricing`_. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + :type idn_lang_code: string + :param idn_lang_code: Reserved for future use. + + :type duration_in_years: integer + :param duration_in_years: The number of years the domain will be + registered. Domains are registered for a minimum of one year. The + maximum period depends on the top-level domain. + Type: Integer + + Default: 1 + + Valid values: Integer from 1 to 10 + + Required: Yes + + :type nameservers: list + :param nameservers: Contains details for the host and glue IP + addresses. + Type: Complex + + Children: `GlueIps`, `Name` + + :type auth_code: string + :param auth_code: The authorization code for the domain. You get this + value from the current registrar. + Type: String + + Required: Yes + + :type auto_renew: boolean + :param auto_renew: Indicates whether the domain will be automatically + renewed (true) or not (false). Autorenewal only takes effect after + the account is charged. + Type: Boolean + + Valid values: `True` | `False` + + Default: true + + Required: No + + :type admin_contact: dict + :param admin_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type registrant_contact: dict + :param registrant_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type tech_contact: dict + :param tech_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type privacy_protect_admin_contact: boolean + :param privacy_protect_admin_contact: Whether you want to conceal + contact information from WHOIS queries. If you specify true, WHOIS + ("who is") queries will return contact information for our + registrar partner, Gandi, instead of the contact information that + you enter. + Type: Boolean + + Default: `True` + + Valid values: `True` | `False` + + Required: No + + :type privacy_protect_registrant_contact: boolean + :param privacy_protect_registrant_contact: Whether you want to conceal + contact information from WHOIS queries. If you specify true, WHOIS + ("who is") queries will return contact information for our + registrar partner, Gandi, instead of the contact information that + you enter. + Type: Boolean + + Default: `True` + + Valid values: `True` | `False` + + Required: No + + :type privacy_protect_tech_contact: boolean + :param privacy_protect_tech_contact: Whether you want to conceal + contact information from WHOIS queries. If you specify true, WHOIS + ("who is") queries will return contact information for our + registrar partner, Gandi, instead of the contact information that + you enter. + Type: Boolean + + Default: `True` + + Valid values: `True` | `False` + + Required: No + + """ + params = { + 'DomainName': domain_name, + 'DurationInYears': duration_in_years, + 'Nameservers': nameservers, + 'AdminContact': admin_contact, + 'RegistrantContact': registrant_contact, + 'TechContact': tech_contact, + } + if idn_lang_code is not None: + params['IdnLangCode'] = idn_lang_code + if auth_code is not None: + params['AuthCode'] = auth_code + if auto_renew is not None: + params['AutoRenew'] = auto_renew + if privacy_protect_admin_contact is not None: + params['PrivacyProtectAdminContact'] = privacy_protect_admin_contact + if privacy_protect_registrant_contact is not None: + params['PrivacyProtectRegistrantContact'] = privacy_protect_registrant_contact + if privacy_protect_tech_contact is not None: + params['PrivacyProtectTechContact'] = privacy_protect_tech_contact + return self.make_request(action='TransferDomain', + body=json.dumps(params)) + + def update_domain_contact(self, domain_name, admin_contact=None, + registrant_contact=None, tech_contact=None): + """ + This operation updates the contact information for a + particular domain. Information for at least one contact + (registrant, administrator, or technical) must be supplied for + update. + + If the update is successful, this method returns an operation + ID that you can use to track the progress and completion of + the action. If the request is not completed successfully, the + domain registrant will be notified by email. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + :type admin_contact: dict + :param admin_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type registrant_contact: dict + :param registrant_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type tech_contact: dict + :param tech_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + """ + params = {'DomainName': domain_name, } + if admin_contact is not None: + params['AdminContact'] = admin_contact + if registrant_contact is not None: + params['RegistrantContact'] = registrant_contact + if tech_contact is not None: + params['TechContact'] = tech_contact + return self.make_request(action='UpdateDomainContact', + body=json.dumps(params)) + + def update_domain_contact_privacy(self, domain_name, admin_privacy=None, + registrant_privacy=None, + tech_privacy=None): + """ + This operation updates the specified domain contact's privacy + setting. When the privacy option is enabled, personal + information such as postal or email address is hidden from the + results of a public WHOIS query. The privacy services are + provided by the AWS registrar, Gandi. For more information, + see the `Gandi privacy features`_. + + This operation only affects the privacy of the specified + contact type (registrant, administrator, or tech). Successful + acceptance returns an operation ID that you can use with + GetOperationDetail to track the progress and completion of the + action. If the request is not completed successfully, the + domain registrant will be notified by email. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + :type admin_privacy: boolean + :param admin_privacy: Whether you want to conceal contact information + from WHOIS queries. If you specify true, WHOIS ("who is") queries + will return contact information for our registrar partner, Gandi, + instead of the contact information that you enter. + Type: Boolean + + Default: None + + Valid values: `True` | `False` + + Required: No + + :type registrant_privacy: boolean + :param registrant_privacy: Whether you want to conceal contact + information from WHOIS queries. If you specify true, WHOIS ("who + is") queries will return contact information for our registrar + partner, Gandi, instead of the contact information that you enter. + Type: Boolean + + Default: None + + Valid values: `True` | `False` + + Required: No + + :type tech_privacy: boolean + :param tech_privacy: Whether you want to conceal contact information + from WHOIS queries. If you specify true, WHOIS ("who is") queries + will return contact information for our registrar partner, Gandi, + instead of the contact information that you enter. + Type: Boolean + + Default: None + + Valid values: `True` | `False` + + Required: No + + """ + params = {'DomainName': domain_name, } + if admin_privacy is not None: + params['AdminPrivacy'] = admin_privacy + if registrant_privacy is not None: + params['RegistrantPrivacy'] = registrant_privacy + if tech_privacy is not None: + params['TechPrivacy'] = tech_privacy + return self.make_request(action='UpdateDomainContactPrivacy', + body=json.dumps(params)) + + def update_domain_nameservers(self, domain_name, nameservers): + """ + This operation replaces the current set of name servers for + the domain with the specified set of name servers. If you use + Amazon Route 53 as your DNS service, specify the four name + servers in the delegation set for the hosted zone for the + domain. + + If successful, this operation returns an operation ID that you + can use to track the progress and completion of the action. If + the request is not completed successfully, the domain + registrant will be notified by email. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + :type nameservers: list + :param nameservers: A list of new name servers for the domain. + Type: Complex + + Children: `Name`, `GlueIps` + + Required: Yes + + """ + params = { + 'DomainName': domain_name, + 'Nameservers': nameservers, + } + return self.make_request(action='UpdateDomainNameservers', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/route53/exception.py b/desktop/core/ext-py/boto-2.38.0/boto/route53/exception.py new file mode 100644 index 0000000000000000000000000000000000000000..61b33b0c57a2a60b19618ed095e02e8c440af209 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/route53/exception.py @@ -0,0 +1,27 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import BotoServerError + + +class DNSServerError(BotoServerError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/route53/healthcheck.py b/desktop/core/ext-py/boto-2.38.0/boto/route53/healthcheck.py new file mode 100644 index 0000000000000000000000000000000000000000..43fdf1792428f2c0b81b6d5189832f9019973344 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/route53/healthcheck.py @@ -0,0 +1,146 @@ +# Copyright (c) 2014 Tellybug, Matt Millar +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +""" +From http://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateHealthCheck.html + +POST /2013-04-01/healthcheck HTTP/1.1 + + + + unique description + + IP address of the endpoint to check + port on the endpoint to check + HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP + path of the file that + you want Amazon Route 53 to request + domain name of the + endpoint to check + if Type is HTTP_STR_MATCH or HTTPS_STR_MATCH, + the string to search for in the response body + from the specified resource + 10 | 30 + integer between 1 and 10 + + +""" + + +class HealthCheck(object): + + """An individual health check""" + + POSTXMLBody = """ + + %(ip_addr_part)s + %(port)s + %(type)s + %(resource_path)s + %(fqdn_part)s + %(string_match_part)s + %(request_interval)s + %(failure_threshold)s + + """ + + XMLIpAddrPart = """%(ip_addr)s""" + + XMLFQDNPart = """%(fqdn)s""" + + XMLStringMatchPart = """%(string_match)s""" + + XMLRequestIntervalPart = """%(request_interval)d""" + + valid_request_intervals = (10, 30) + + def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_match=None, request_interval=30, failure_threshold=3): + """ + HealthCheck object + + :type ip_addr: str + :param ip_addr: Optional IP Address + + :type port: int + :param port: Port to check + + :type hc_type: str + :param hc_type: One of HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP + + :type resource_path: str + :param resource_path: Path to check + + :type fqdn: str + :param fqdn: domain name of the endpoint to check + + :type string_match: str + :param string_match: if hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the response body from the specified resource + + :type request_interval: int + :param request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request. + + :type failure_threshold: int + :param failure_threshold: The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. + + """ + self.ip_addr = ip_addr + self.port = port + self.hc_type = hc_type + self.resource_path = resource_path + self.fqdn = fqdn + self.string_match = string_match + self.failure_threshold = failure_threshold + + if request_interval in self.valid_request_intervals: + self.request_interval = request_interval + else: + raise AttributeError( + "Valid values for request_interval are: %s" % + ",".join(str(i) for i in self.valid_request_intervals)) + + if failure_threshold < 1 or failure_threshold > 10: + raise AttributeError( + 'Valid values for failure_threshold are 1 - 10.') + + def to_xml(self): + params = { + 'ip_addr_part': '', + 'port': self.port, + 'type': self.hc_type, + 'resource_path': self.resource_path, + 'fqdn_part': "", + 'string_match_part': "", + 'request_interval': (self.XMLRequestIntervalPart % + {'request_interval': self.request_interval}), + 'failure_threshold': self.failure_threshold, + } + if self.fqdn is not None: + params['fqdn_part'] = self.XMLFQDNPart % {'fqdn': self.fqdn} + + if self.ip_addr: + params['ip_addr_part'] = self.XMLIpAddrPart % {'ip_addr': self.ip_addr} + + if self.string_match is not None: + params['string_match_part'] = self.XMLStringMatchPart % {'string_match': self.string_match} + + return self.POSTXMLBody % params diff --git a/desktop/core/ext-py/boto-2.38.0/boto/route53/hostedzone.py b/desktop/core/ext-py/boto-2.38.0/boto/route53/hostedzone.py new file mode 100644 index 0000000000000000000000000000000000000000..93215382663232d9d09bd7023fadbf391ca6bb6c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/route53/hostedzone.py @@ -0,0 +1,51 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class HostedZone(object): + + def __init__(self, id=None, name=None, owner=None, version=None, + caller_reference=None): + self.id = id + self.name = name + self.owner = owner + self.version = version + self.caller_reference = caller_reference + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'Name': + self.name = value + elif name == 'Owner': + self.owner = value + elif name == 'Version': + self.version = value + elif name == 'CallerReference': + self.caller_reference = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/route53/record.py b/desktop/core/ext-py/boto-2.38.0/boto/route53/record.py new file mode 100644 index 0000000000000000000000000000000000000000..05cddce6a281cc87546b83ab2f3f1768772cbebd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/route53/record.py @@ -0,0 +1,374 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +RECORD_TYPES = ['A', 'AAAA', 'TXT', 'CNAME', 'MX', 'PTR', 'SRV', 'SPF'] + +from boto.resultset import ResultSet + + +class ResourceRecordSets(ResultSet): + """ + A list of resource records. + + :ivar hosted_zone_id: The ID of the hosted zone. + :ivar comment: A comment that will be stored with the change. + :ivar changes: A list of changes. + """ + + ChangeResourceRecordSetsBody = """ + + + %(comment)s + %(changes)s + + """ + + ChangeXML = """ + %(action)s + %(record)s + """ + + def __init__(self, connection=None, hosted_zone_id=None, comment=None): + self.connection = connection + self.hosted_zone_id = hosted_zone_id + self.comment = comment + self.changes = [] + self.next_record_name = None + self.next_record_type = None + self.next_record_identifier = None + super(ResourceRecordSets, self).__init__([('ResourceRecordSet', Record)]) + + def __repr__(self): + if self.changes: + record_list = ','.join([c.__repr__() for c in self.changes]) + else: + record_list = ','.join([record.__repr__() for record in self]) + return '%s""" + + XMLBody = """ + %(name)s + %(type)s + %(weight)s + %(body)s + %(health_check)s + """ + + WRRBody = """ + %(identifier)s + %(weight)s + """ + + RRRBody = """ + %(identifier)s + %(region)s + """ + + FailoverBody = """ + %(identifier)s + %(failover)s + """ + + ResourceRecordsBody = """ + %(ttl)s + + %(records)s + """ + + ResourceRecordBody = """ + %s + """ + + AliasBody = """ + %(hosted_zone_id)s + %(dns_name)s + %(eval_target_health)s + """ + + EvaluateTargetHealth = """%s""" + + def __init__(self, name=None, type=None, ttl=600, resource_records=None, + alias_hosted_zone_id=None, alias_dns_name=None, identifier=None, + weight=None, region=None, alias_evaluate_target_health=None, + health_check=None, failover=None): + self.name = name + self.type = type + self.ttl = ttl + if resource_records is None: + resource_records = [] + self.resource_records = resource_records + self.alias_hosted_zone_id = alias_hosted_zone_id + self.alias_dns_name = alias_dns_name + self.identifier = identifier + self.weight = weight + self.region = region + self.alias_evaluate_target_health = alias_evaluate_target_health + self.health_check = health_check + self.failover = failover + + def __repr__(self): + return '' % (self.name, self.type, self.to_print()) + + def add_value(self, value): + """Add a resource record value""" + self.resource_records.append(value) + + def set_alias(self, alias_hosted_zone_id, alias_dns_name, + alias_evaluate_target_health=False): + """Make this an alias resource record set""" + self.alias_hosted_zone_id = alias_hosted_zone_id + self.alias_dns_name = alias_dns_name + self.alias_evaluate_target_health = alias_evaluate_target_health + + def to_xml(self): + """Spit this resource record set out as XML""" + if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None: + # Use alias + if self.alias_evaluate_target_health is not None: + eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false') + else: + eval_target_health = "" + + body = self.AliasBody % {"hosted_zone_id": self.alias_hosted_zone_id, + "dns_name": self.alias_dns_name, + "eval_target_health": eval_target_health} + else: + # Use resource record(s) + records = "" + + for r in self.resource_records: + records += self.ResourceRecordBody % r + + body = self.ResourceRecordsBody % { + "ttl": self.ttl, + "records": records, + } + + weight = "" + + if self.identifier is not None and self.weight is not None: + weight = self.WRRBody % {"identifier": self.identifier, + "weight": self.weight} + elif self.identifier is not None and self.region is not None: + weight = self.RRRBody % {"identifier": self.identifier, + "region": self.region} + elif self.identifier is not None and self.failover is not None: + weight = self.FailoverBody % {"identifier": self.identifier, + "failover": self.failover} + + health_check = "" + if self.health_check is not None: + health_check = self.HealthCheckBody % (self.health_check) + + params = { + "name": self.name, + "type": self.type, + "weight": weight, + "body": body, + "health_check": health_check + } + return self.XMLBody % params + + def to_print(self): + rr = "" + if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None: + # Show alias + rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name + if self.alias_evaluate_target_health is not None: + rr += ' (EvalTarget %s)' % self.alias_evaluate_target_health + else: + # Show resource record(s) + rr = ",".join(self.resource_records) + + if self.identifier is not None and self.weight is not None: + rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight) + elif self.identifier is not None and self.region is not None: + rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region) + elif self.identifier is not None and self.failover is not None: + rr += ' (FAILOVER id=%s, failover=%s)' % (self.identifier, self.failover) + + return rr + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'Type': + self.type = value + elif name == 'TTL': + self.ttl = value + elif name == 'Value': + self.resource_records.append(value) + elif name == 'HostedZoneId': + self.alias_hosted_zone_id = value + elif name == 'DNSName': + self.alias_dns_name = value + elif name == 'SetIdentifier': + self.identifier = value + elif name == 'EvaluateTargetHealth': + self.alias_evaluate_target_health = value.lower() == 'true' + elif name == 'Weight': + self.weight = value + elif name == 'Region': + self.region = value + elif name == 'Failover': + self.failover = value + elif name == 'HealthCheckId': + self.health_check = value + + def startElement(self, name, attrs, connection): + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/route53/status.py b/desktop/core/ext-py/boto-2.38.0/boto/route53/status.py new file mode 100644 index 0000000000000000000000000000000000000000..782372a811bc9445bd77b776d36355905bea4771 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/route53/status.py @@ -0,0 +1,42 @@ +# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton +# www.bluepines.org +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Status(object): + def __init__(self, route53connection, change_dict): + self.route53connection = route53connection + for key in change_dict: + if key == 'Id': + self.__setattr__(key.lower(), + change_dict[key].replace('/change/', '')) + else: + self.__setattr__(key.lower(), change_dict[key]) + + def update(self): + """ Update the status of this request.""" + status = self.route53connection.get_change(self.id)['GetChangeResponse']['ChangeInfo']['Status'] + self.status = status + return status + + def __repr__(self): + return '' % self.status diff --git a/desktop/core/ext-py/boto-2.38.0/boto/route53/zone.py b/desktop/core/ext-py/boto-2.38.0/boto/route53/zone.py new file mode 100644 index 0000000000000000000000000000000000000000..b21c8de409f4976d3e579087235053bd76eebbda --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/route53/zone.py @@ -0,0 +1,419 @@ +# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton +# www.bluepines.org +# Copyright (c) 2012 42 Lines Inc., Jim Browne +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +default_ttl = 60 + +import copy +from boto.exception import TooManyRecordsException +from boto.route53.record import ResourceRecordSets +from boto.route53.status import Status + + +class Zone(object): + """ + A Route53 Zone. + + :ivar route53connection: A :class:`boto.route53.connection.Route53Connection` connection + :ivar id: The ID of the hosted zone + """ + def __init__(self, route53connection, zone_dict): + self.route53connection = route53connection + for key in zone_dict: + if key == 'Id': + self.id = zone_dict['Id'].replace('/hostedzone/', '') + else: + self.__setattr__(key.lower(), zone_dict[key]) + + def __repr__(self): + return '' % self.name + + def _commit(self, changes): + """ + Commit a set of changes and return the ChangeInfo portion of + the response. + + :type changes: ResourceRecordSets + :param changes: changes to be committed + """ + response = changes.commit() + return response['ChangeResourceRecordSetsResponse']['ChangeInfo'] + + def _new_record(self, changes, resource_type, name, value, ttl, identifier, + comment=""): + """ + Add a CREATE change record to an existing ResourceRecordSets + + :type changes: ResourceRecordSets + :param changes: change set to append to + + :type name: str + :param name: The name of the resource record you want to + perform the action on. + + :type resource_type: str + :param resource_type: The DNS record type + + :param value: Appropriate value for resource_type + + :type ttl: int + :param ttl: The resource record cache time to live (TTL), in seconds. + + :type identifier: tuple + :param identifier: A tuple for setting WRR or LBR attributes. Valid + forms are: + + * (str, int): WRR record [e.g. ('foo',10)] + * (str, str): LBR record [e.g. ('foo','us-east-1') + + :type comment: str + :param comment: A comment that will be stored with the change. + """ + weight = None + region = None + if identifier is not None: + try: + int(identifier[1]) + weight = identifier[1] + identifier = identifier[0] + except: + region = identifier[1] + identifier = identifier[0] + change = changes.add_change("CREATE", name, resource_type, ttl, + identifier=identifier, weight=weight, + region=region) + if type(value) in [list, tuple, set]: + for record in value: + change.add_value(record) + else: + change.add_value(value) + + def add_record(self, resource_type, name, value, ttl=60, identifier=None, + comment=""): + """ + Add a new record to this Zone. See _new_record for parameter + documentation. Returns a Status object. + """ + changes = ResourceRecordSets(self.route53connection, self.id, comment) + self._new_record(changes, resource_type, name, value, ttl, identifier, + comment) + return Status(self.route53connection, self._commit(changes)) + + def update_record(self, old_record, new_value, new_ttl=None, + new_identifier=None, comment=""): + """ + Update an existing record in this Zone. Returns a Status object. + + :type old_record: ResourceRecord + :param old_record: A ResourceRecord (e.g. returned by find_records) + + See _new_record for additional parameter documentation. + """ + new_ttl = new_ttl or default_ttl + record = copy.copy(old_record) + changes = ResourceRecordSets(self.route53connection, self.id, comment) + changes.add_change_record("DELETE", record) + self._new_record(changes, record.type, record.name, + new_value, new_ttl, new_identifier, comment) + return Status(self.route53connection, self._commit(changes)) + + def delete_record(self, record, comment=""): + """ + Delete one or more records from this Zone. Returns a Status object. + + :param record: A ResourceRecord (e.g. returned by + find_records) or list, tuple, or set of ResourceRecords. + + :type comment: str + :param comment: A comment that will be stored with the change. + """ + changes = ResourceRecordSets(self.route53connection, self.id, comment) + if type(record) in [list, tuple, set]: + for r in record: + changes.add_change_record("DELETE", r) + else: + changes.add_change_record("DELETE", record) + return Status(self.route53connection, self._commit(changes)) + + def add_cname(self, name, value, ttl=None, identifier=None, comment=""): + """ + Add a new CNAME record to this Zone. See _new_record for + parameter documentation. Returns a Status object. + """ + ttl = ttl or default_ttl + name = self.route53connection._make_qualified(name) + value = self.route53connection._make_qualified(value) + return self.add_record(resource_type='CNAME', + name=name, + value=value, + ttl=ttl, + identifier=identifier, + comment=comment) + + def add_a(self, name, value, ttl=None, identifier=None, comment=""): + """ + Add a new A record to this Zone. See _new_record for + parameter documentation. Returns a Status object. + """ + ttl = ttl or default_ttl + name = self.route53connection._make_qualified(name) + return self.add_record(resource_type='A', + name=name, + value=value, + ttl=ttl, + identifier=identifier, + comment=comment) + + def add_mx(self, name, records, ttl=None, identifier=None, comment=""): + """ + Add a new MX record to this Zone. See _new_record for + parameter documentation. Returns a Status object. + """ + ttl = ttl or default_ttl + records = self.route53connection._make_qualified(records) + return self.add_record(resource_type='MX', + name=name, + value=records, + ttl=ttl, + identifier=identifier, + comment=comment) + + def find_records(self, name, type, desired=1, all=False, identifier=None): + """ + Search this Zone for records that match given parameters. + Returns None if no results, a ResourceRecord if one result, or + a ResourceRecordSets if more than one result. + + :type name: str + :param name: The name of the records should match this parameter + + :type type: str + :param type: The type of the records should match this parameter + + :type desired: int + :param desired: The number of desired results. If the number of + matching records in the Zone exceeds the value of this parameter, + throw TooManyRecordsException + + :type all: Boolean + :param all: If true return all records that match name, type, and + identifier parameters + + :type identifier: Tuple + :param identifier: A tuple specifying WRR or LBR attributes. Valid + forms are: + + * (str, int): WRR record [e.g. ('foo',10)] + * (str, str): LBR record [e.g. ('foo','us-east-1') + + """ + name = self.route53connection._make_qualified(name) + returned = self.route53connection.get_all_rrsets(self.id, name=name, + type=type) + + # name/type for get_all_rrsets sets the starting record; they + # are not a filter + results = [] + for r in returned: + if r.name == name and r.type == type: + results.append(r) + # Is at the end of the list of matched records. No need to continue + # since the records are sorted by name and type. + else: + break + + weight = None + region = None + if identifier is not None: + try: + int(identifier[1]) + weight = identifier[1] + except: + region = identifier[1] + + if weight is not None: + results = [r for r in results if (r.weight == weight and + r.identifier == identifier[0])] + if region is not None: + results = [r for r in results if (r.region == region and + r.identifier == identifier[0])] + + if ((not all) and (len(results) > desired)): + message = "Search: name %s type %s" % (name, type) + message += "\nFound: " + message += ", ".join(["%s %s %s" % (r.name, r.type, r.to_print()) + for r in results]) + raise TooManyRecordsException(message) + elif len(results) > 1: + return results + elif len(results) == 1: + return results[0] + else: + return None + + def get_cname(self, name, all=False): + """ + Search this Zone for CNAME records that match name. + + Returns a ResourceRecord. + + If there is more than one match return all as a + ResourceRecordSets if all is True, otherwise throws + TooManyRecordsException. + """ + return self.find_records(name, 'CNAME', all=all) + + def get_a(self, name, all=False): + """ + Search this Zone for A records that match name. + + Returns a ResourceRecord. + + If there is more than one match return all as a + ResourceRecordSets if all is True, otherwise throws + TooManyRecordsException. + """ + return self.find_records(name, 'A', all=all) + + def get_mx(self, name, all=False): + """ + Search this Zone for MX records that match name. + + Returns a ResourceRecord. + + If there is more than one match return all as a + ResourceRecordSets if all is True, otherwise throws + TooManyRecordsException. + """ + return self.find_records(name, 'MX', all=all) + + def update_cname(self, name, value, ttl=None, identifier=None, comment=""): + """ + Update the given CNAME record in this Zone to a new value, ttl, + and identifier. Returns a Status object. + + Will throw TooManyRecordsException is name, value does not match + a single record. + """ + name = self.route53connection._make_qualified(name) + value = self.route53connection._make_qualified(value) + old_record = self.get_cname(name) + ttl = ttl or old_record.ttl + return self.update_record(old_record, + new_value=value, + new_ttl=ttl, + new_identifier=identifier, + comment=comment) + + def update_a(self, name, value, ttl=None, identifier=None, comment=""): + """ + Update the given A record in this Zone to a new value, ttl, + and identifier. Returns a Status object. + + Will throw TooManyRecordsException is name, value does not match + a single record. + """ + name = self.route53connection._make_qualified(name) + old_record = self.get_a(name) + ttl = ttl or old_record.ttl + return self.update_record(old_record, + new_value=value, + new_ttl=ttl, + new_identifier=identifier, + comment=comment) + + def update_mx(self, name, value, ttl=None, identifier=None, comment=""): + """ + Update the given MX record in this Zone to a new value, ttl, + and identifier. Returns a Status object. + + Will throw TooManyRecordsException is name, value does not match + a single record. + """ + name = self.route53connection._make_qualified(name) + value = self.route53connection._make_qualified(value) + old_record = self.get_mx(name) + ttl = ttl or old_record.ttl + return self.update_record(old_record, + new_value=value, + new_ttl=ttl, + new_identifier=identifier, + comment=comment) + + def delete_cname(self, name, identifier=None, all=False): + """ + Delete a CNAME record matching name and identifier from + this Zone. Returns a Status object. + + If there is more than one match delete all matching records if + all is True, otherwise throws TooManyRecordsException. + """ + name = self.route53connection._make_qualified(name) + record = self.find_records(name, 'CNAME', identifier=identifier, + all=all) + return self.delete_record(record) + + def delete_a(self, name, identifier=None, all=False): + """ + Delete an A record matching name and identifier from this + Zone. Returns a Status object. + + If there is more than one match delete all matching records if + all is True, otherwise throws TooManyRecordsException. + """ + name = self.route53connection._make_qualified(name) + record = self.find_records(name, 'A', identifier=identifier, + all=all) + return self.delete_record(record) + + def delete_mx(self, name, identifier=None, all=False): + """ + Delete an MX record matching name and identifier from this + Zone. Returns a Status object. + + If there is more than one match delete all matching records if + all is True, otherwise throws TooManyRecordsException. + """ + name = self.route53connection._make_qualified(name) + record = self.find_records(name, 'MX', identifier=identifier, + all=all) + return self.delete_record(record) + + def get_records(self): + """ + Return a ResourceRecordsSets for all of the records in this zone. + """ + return self.route53connection.get_all_rrsets(self.id) + + def delete(self): + """ + Request that this zone be deleted by Amazon. + """ + self.route53connection.delete_hosted_zone(self.id) + + def get_nameservers(self): + """ Get the list of nameservers for this zone.""" + ns = self.find_records(self.name, 'NS') + if ns is not None: + ns = ns.resource_records + return ns diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b1994b9de122bece5e54b9e85fda8c5ae5bf5627 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/__init__.py @@ -0,0 +1,74 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2014, Steven Richards +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo, get_regions + + +class S3RegionInfo(RegionInfo): + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an connection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the connection + class's constructor as keyword arguments and they will be + passed along to the connection object. + + :rtype: Connection object + :return: The connection to this regions endpoint + """ + if self.connection_cls: + return self.connection_cls(host=self.endpoint, **kw_params) + + +def regions(): + """ + Get all available regions for the Amazon S3 service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.s3.connection import S3Connection + return get_regions( + 's3', + region_cls=S3RegionInfo, + connection_cls=S3Connection + ) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if 'host' in kw_params.keys(): + # Make sure the host specified is not nothing + if kw_params['host'] not in ['', None]: + region.endpoint = kw_params['host'] + del kw_params['host'] + return region.connect(**kw_params) + # If it is nothing then remove it from kw_params and proceed with default + else: + del kw_params['host'] + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/acl.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/acl.py new file mode 100644 index 0000000000000000000000000000000000000000..9d73ddfec9a95ce37f9f37af2f5eaf56e1ea0dc0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/acl.py @@ -0,0 +1,171 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.user import User + + +CannedACLStrings = ['private', 'public-read', + 'public-read-write', 'authenticated-read', + 'bucket-owner-read', 'bucket-owner-full-control', + 'log-delivery-write'] + + +class Policy(object): + + def __init__(self, parent=None): + self.parent = parent + self.namespace = None + self.acl = None + + def __repr__(self): + grants = [] + for g in self.acl.grants: + if g.id == self.owner.id: + grants.append("%s (owner) = %s" % (g.display_name, g.permission)) + else: + if g.type == 'CanonicalUser': + u = g.display_name + elif g.type == 'Group': + u = g.uri + else: + u = g.email_address + grants.append("%s = %s" % (u, g.permission)) + return "" % ", ".join(grants) + + def startElement(self, name, attrs, connection): + if name == 'AccessControlPolicy': + self.namespace = attrs.get('xmlns', None) + return None + if name == 'Owner': + self.owner = User(self) + return self.owner + elif name == 'AccessControlList': + self.acl = ACL(self) + return self.acl + else: + return None + + def endElement(self, name, value, connection): + if name == 'Owner': + pass + elif name == 'AccessControlList': + pass + else: + setattr(self, name, value) + + def to_xml(self): + if self.namespace is not None: + s = ''.format(self.namespace) + else: + s = '' + s += self.owner.to_xml() + s += self.acl.to_xml() + s += '' + return s + + +class ACL(object): + + def __init__(self, policy=None): + self.policy = policy + self.grants = [] + + def add_grant(self, grant): + self.grants.append(grant) + + def add_email_grant(self, permission, email_address): + grant = Grant(permission=permission, type='AmazonCustomerByEmail', + email_address=email_address) + self.grants.append(grant) + + def add_user_grant(self, permission, user_id, display_name=None): + grant = Grant(permission=permission, type='CanonicalUser', id=user_id, display_name=display_name) + self.grants.append(grant) + + def startElement(self, name, attrs, connection): + if name == 'Grant': + self.grants.append(Grant(self)) + return self.grants[-1] + else: + return None + + def endElement(self, name, value, connection): + if name == 'Grant': + pass + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + for grant in self.grants: + s += grant.to_xml() + s += '' + return s + + +class Grant(object): + + NameSpace = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' + + def __init__(self, permission=None, type=None, id=None, + display_name=None, uri=None, email_address=None): + self.permission = permission + self.id = id + self.display_name = display_name + self.uri = uri + self.email_address = email_address + self.type = type + + def startElement(self, name, attrs, connection): + if name == 'Grantee': + self.type = attrs['xsi:type'] + return None + + def endElement(self, name, value, connection): + if name == 'ID': + self.id = value + elif name == 'DisplayName': + self.display_name = value + elif name == 'URI': + self.uri = value + elif name == 'EmailAddress': + self.email_address = value + elif name == 'Grantee': + pass + elif name == 'Permission': + self.permission = value + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + s += '' % (self.NameSpace, self.type) + if self.type == 'CanonicalUser': + s += '%s' % self.id + s += '%s' % self.display_name + elif self.type == 'Group': + s += '%s' % self.uri + else: + s += '%s' % self.email_address + s += '' + s += '%s' % self.permission + s += '' + return s diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/bucket.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/bucket.py new file mode 100644 index 0000000000000000000000000000000000000000..504f24f9bbf4b66c2f4b96a2b615d4229f3ad57b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/bucket.py @@ -0,0 +1,1876 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto import handler +from boto.resultset import ResultSet +from boto.exception import BotoClientError +from boto.s3.acl import Policy, CannedACLStrings, Grant +from boto.s3.key import Key +from boto.s3.prefix import Prefix +from boto.s3.deletemarker import DeleteMarker +from boto.s3.multipart import MultiPartUpload +from boto.s3.multipart import CompleteMultiPartUpload +from boto.s3.multidelete import MultiDeleteResult +from boto.s3.multidelete import Error +from boto.s3.bucketlistresultset import BucketListResultSet +from boto.s3.bucketlistresultset import VersionedBucketListResultSet +from boto.s3.bucketlistresultset import MultiPartUploadListResultSet +from boto.s3.lifecycle import Lifecycle +from boto.s3.tagging import Tags +from boto.s3.cors import CORSConfiguration +from boto.s3.bucketlogging import BucketLogging +from boto.s3 import website +import boto.jsonresponse +import boto.utils +import xml.sax +import xml.sax.saxutils +import re +import base64 +from collections import defaultdict +from boto.compat import BytesIO, six, StringIO, urllib + +# as per http://goo.gl/BDuud (02/19/2011) + + +class S3WebsiteEndpointTranslate(object): + + trans_region = defaultdict(lambda: 's3-website-us-east-1') + trans_region['eu-west-1'] = 's3-website-eu-west-1' + trans_region['us-west-1'] = 's3-website-us-west-1' + trans_region['us-west-2'] = 's3-website-us-west-2' + trans_region['sa-east-1'] = 's3-website-sa-east-1' + trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1' + trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1' + trans_region['ap-southeast-2'] = 's3-website-ap-southeast-2' + trans_region['cn-north-1'] = 's3-website.cn-north-1' + + @classmethod + def translate_region(self, reg): + return self.trans_region[reg] + +S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL'] + + +class Bucket(object): + + LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery' + + BucketPaymentBody = """ + + %s + """ + + VersioningBody = """ + + %s + %s + """ + + VersionRE = '([A-Za-z]+)' + MFADeleteRE = '([A-Za-z]+)' + + def __init__(self, connection=None, name=None, key_class=Key): + self.name = name + self.connection = connection + self.key_class = key_class + + def __repr__(self): + return '' % self.name + + def __iter__(self): + return iter(BucketListResultSet(self)) + + def __contains__(self, key_name): + return not (self.get_key(key_name) is None) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'CreationDate': + self.creation_date = value + else: + setattr(self, name, value) + + def set_key_class(self, key_class): + """ + Set the Key class associated with this bucket. By default, this + would be the boto.s3.key.Key class but if you want to subclass that + for some reason this allows you to associate your new class with a + bucket so that when you call bucket.new_key() or when you get a listing + of keys in the bucket you will get an instances of your key class + rather than the default. + + :type key_class: class + :param key_class: A subclass of Key that can be more specific + """ + self.key_class = key_class + + def lookup(self, key_name, headers=None): + """ + Deprecated: Please use get_key method. + + :type key_name: string + :param key_name: The name of the key to retrieve + + :rtype: :class:`boto.s3.key.Key` + :returns: A Key object from this bucket. + """ + return self.get_key(key_name, headers=headers) + + def get_key(self, key_name, headers=None, version_id=None, + response_headers=None, validate=True): + """ + Check to see if a particular key exists within the bucket. This + method uses a HEAD request to check for the existence of the key. + Returns: An instance of a Key object or None + + :param key_name: The name of the key to retrieve + :type key_name: string + + :param headers: The headers to send when retrieving the key + :type headers: dict + + :param version_id: + :type version_id: string + + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + :type response_headers: dict + + :param validate: Verifies whether the key exists. If ``False``, this + will not hit the service, constructing an in-memory object. + Default is ``True``. + :type validate: bool + + :rtype: :class:`boto.s3.key.Key` + :returns: A Key object from this bucket. + """ + if validate is False: + if headers or version_id or response_headers: + raise BotoClientError( + "When providing 'validate=False', no other params " + \ + "are allowed." + ) + + # This leans on the default behavior of ``new_key`` (not hitting + # the service). If that changes, that behavior should migrate here. + return self.new_key(key_name) + + query_args_l = [] + if version_id: + query_args_l.append('versionId=%s' % version_id) + if response_headers: + for rk, rv in six.iteritems(response_headers): + query_args_l.append('%s=%s' % (rk, urllib.parse.quote(rv))) + + key, resp = self._get_key_internal(key_name, headers, query_args_l) + return key + + def _get_key_internal(self, key_name, headers, query_args_l): + query_args = '&'.join(query_args_l) or None + response = self.connection.make_request('HEAD', self.name, key_name, + headers=headers, + query_args=query_args) + response.read() + # Allow any success status (2xx) - for example this lets us + # support Range gets, which return status 206: + if response.status / 100 == 2: + k = self.key_class(self) + provider = self.connection.provider + k.metadata = boto.utils.get_aws_metadata(response.msg, provider) + for field in Key.base_fields: + k.__dict__[field.lower().replace('-', '_')] = \ + response.getheader(field) + # the following machinations are a workaround to the fact that + # apache/fastcgi omits the content-length header on HEAD + # requests when the content-length is zero. + # See http://goo.gl/0Tdax for more details. + clen = response.getheader('content-length') + if clen: + k.size = int(response.getheader('content-length')) + else: + k.size = 0 + k.name = key_name + k.handle_version_headers(response) + k.handle_encryption_headers(response) + k.handle_restore_headers(response) + k.handle_addl_headers(response.getheaders()) + return k, response + else: + if response.status == 404: + return None, response + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, '') + + def list(self, prefix='', delimiter='', marker='', headers=None, + encoding_type=None): + """ + List key objects within a bucket. This returns an instance of an + BucketListResultSet that automatically handles all of the result + paging, etc. from S3. You just need to keep iterating until + there are no more results. + + Called with no arguments, this will return an iterator object across + all keys within the bucket. + + The Key objects returned by the iterator are obtained by parsing + the results of a GET on the bucket, also known as the List Objects + request. The XML returned by this request contains only a subset + of the information about each key. Certain metadata fields such + as Content-Type and user metadata are not available in the XML. + Therefore, if you want these additional metadata fields you will + have to do a HEAD request on the Key in the bucket. + + :type prefix: string + :param prefix: allows you to limit the listing to a particular + prefix. For example, if you call the method with + prefix='/foo/' then the iterator will only cycle through + the keys that begin with the string '/foo/'. + + :type delimiter: string + :param delimiter: can be used in conjunction with the prefix + to allow you to organize and browse your keys + hierarchically. See http://goo.gl/Xx63h for more details. + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + + :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc + """ + return BucketListResultSet(self, prefix, delimiter, marker, headers, + encoding_type=encoding_type) + + def list_versions(self, prefix='', delimiter='', key_marker='', + version_id_marker='', headers=None, encoding_type=None): + """ + List version objects within a bucket. This returns an + instance of an VersionedBucketListResultSet that automatically + handles all of the result paging, etc. from S3. You just need + to keep iterating until there are no more results. Called + with no arguments, this will return an iterator object across + all keys within the bucket. + + :type prefix: string + :param prefix: allows you to limit the listing to a particular + prefix. For example, if you call the method with + prefix='/foo/' then the iterator will only cycle through + the keys that begin with the string '/foo/'. + + :type delimiter: string + :param delimiter: can be used in conjunction with the prefix + to allow you to organize and browse your keys + hierarchically. See: + + http://aws.amazon.com/releasenotes/Amazon-S3/213 + + for more details. + + :type key_marker: string + :param key_marker: The "marker" of where you are in the result set + + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + + :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc + """ + return VersionedBucketListResultSet(self, prefix, delimiter, + key_marker, version_id_marker, + headers, + encoding_type=encoding_type) + + def list_multipart_uploads(self, key_marker='', + upload_id_marker='', + headers=None, encoding_type=None): + """ + List multipart upload objects within a bucket. This returns an + instance of an MultiPartUploadListResultSet that automatically + handles all of the result paging, etc. from S3. You just need + to keep iterating until there are no more results. + + :type key_marker: string + :param key_marker: The "marker" of where you are in the result set + + :type upload_id_marker: string + :param upload_id_marker: The upload identifier + + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + + :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc + """ + return MultiPartUploadListResultSet(self, key_marker, + upload_id_marker, + headers, + encoding_type=encoding_type) + + def _get_all_query_args(self, params, initial_query_string=''): + pairs = [] + + if initial_query_string: + pairs.append(initial_query_string) + + for key, value in sorted(params.items(), key=lambda x: x[0]): + if value is None: + continue + key = key.replace('_', '-') + if key == 'maxkeys': + key = 'max-keys' + if not isinstance(value, six.string_types + (six.binary_type,)): + value = six.text_type(value) + if not isinstance(value, six.binary_type): + value = value.encode('utf-8') + if value: + pairs.append(u'%s=%s' % ( + urllib.parse.quote(key), + urllib.parse.quote(value) + )) + + return '&'.join(pairs) + + def _get_all(self, element_map, initial_query_string='', + headers=None, **params): + query_args = self._get_all_query_args( + params, + initial_query_string=initial_query_string + ) + response = self.connection.make_request('GET', self.name, + headers=headers, + query_args=query_args) + body = response.read() + boto.log.debug(body) + if response.status == 200: + rs = ResultSet(element_map) + h = handler.XmlHandler(rs, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return rs + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def validate_kwarg_names(self, kwargs, names): + """ + Checks that all named arguments are in the specified list of names. + + :type kwargs: dict + :param kwargs: Dictionary of kwargs to validate. + + :type names: list + :param names: List of possible named arguments. + """ + for kwarg in kwargs: + if kwarg not in names: + raise TypeError('Invalid argument "%s"!' % kwarg) + + def get_all_keys(self, headers=None, **params): + """ + A lower-level method for listing contents of a bucket. This + closely models the actual S3 API and requires you to manually + handle the paging of results. For a higher-level method that + handles the details of paging for you, you can use the list + method. + + :type max_keys: int + :param max_keys: The maximum number of keys to retrieve + + :type prefix: string + :param prefix: The prefix of the keys you want to retrieve + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :type delimiter: string + :param delimiter: If this optional, Unicode string parameter + is included with your request, then keys that contain the + same string between the prefix and the first occurrence of + the delimiter will be rolled up into a single result + element in the CommonPrefixes collection. These rolled-up + keys are not returned elsewhere in the response. + + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + + :rtype: ResultSet + :return: The result from S3 listing the keys requested + + """ + self.validate_kwarg_names(params, ['maxkeys', 'max_keys', 'prefix', + 'marker', 'delimiter', + 'encoding_type']) + return self._get_all([('Contents', self.key_class), + ('CommonPrefixes', Prefix)], + '', headers, **params) + + def get_all_versions(self, headers=None, **params): + """ + A lower-level, version-aware method for listing contents of a + bucket. This closely models the actual S3 API and requires + you to manually handle the paging of results. For a + higher-level method that handles the details of paging for + you, you can use the list method. + + :type max_keys: int + :param max_keys: The maximum number of keys to retrieve + + :type prefix: string + :param prefix: The prefix of the keys you want to retrieve + + :type key_marker: string + :param key_marker: The "marker" of where you are in the result set + with respect to keys. + + :type version_id_marker: string + :param version_id_marker: The "marker" of where you are in the result + set with respect to version-id's. + + :type delimiter: string + :param delimiter: If this optional, Unicode string parameter + is included with your request, then keys that contain the + same string between the prefix and the first occurrence of + the delimiter will be rolled up into a single result + element in the CommonPrefixes collection. These rolled-up + keys are not returned elsewhere in the response. + + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + + :rtype: ResultSet + :return: The result from S3 listing the keys requested + """ + self.validate_get_all_versions_params(params) + return self._get_all([('Version', self.key_class), + ('CommonPrefixes', Prefix), + ('DeleteMarker', DeleteMarker)], + 'versions', headers, **params) + + def validate_get_all_versions_params(self, params): + """ + Validate that the parameters passed to get_all_versions are valid. + Overridden by subclasses that allow a different set of parameters. + + :type params: dict + :param params: Parameters to validate. + """ + self.validate_kwarg_names( + params, ['maxkeys', 'max_keys', 'prefix', 'key_marker', + 'version_id_marker', 'delimiter', 'encoding_type']) + + def get_all_multipart_uploads(self, headers=None, **params): + """ + A lower-level, version-aware method for listing active + MultiPart uploads for a bucket. This closely models the + actual S3 API and requires you to manually handle the paging + of results. For a higher-level method that handles the + details of paging for you, you can use the list method. + + :type max_uploads: int + :param max_uploads: The maximum number of uploads to retrieve. + Default value is 1000. + + :type key_marker: string + :param key_marker: Together with upload_id_marker, this + parameter specifies the multipart upload after which + listing should begin. If upload_id_marker is not + specified, only the keys lexicographically greater than + the specified key_marker will be included in the list. + + If upload_id_marker is specified, any multipart uploads + for a key equal to the key_marker might also be included, + provided those multipart uploads have upload IDs + lexicographically greater than the specified + upload_id_marker. + + :type upload_id_marker: string + :param upload_id_marker: Together with key-marker, specifies + the multipart upload after which listing should begin. If + key_marker is not specified, the upload_id_marker + parameter is ignored. Otherwise, any multipart uploads + for a key equal to the key_marker might be included in the + list only if they have an upload ID lexicographically + greater than the specified upload_id_marker. + + :type encoding_type: string + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + + :type delimiter: string + :param delimiter: Character you use to group keys. + All keys that contain the same string between the prefix, if + specified, and the first occurrence of the delimiter after the + prefix are grouped under a single result element, CommonPrefixes. + If you don't specify the prefix parameter, then the substring + starts at the beginning of the key. The keys that are grouped + under CommonPrefixes result element are not returned elsewhere + in the response. + + :type prefix: string + :param prefix: Lists in-progress uploads only for those keys that + begin with the specified prefix. You can use prefixes to separate + a bucket into different grouping of keys. (You can think of using + prefix to make groups in the same way you'd use a folder in a + file system.) + + :rtype: ResultSet + :return: The result from S3 listing the uploads requested + + """ + self.validate_kwarg_names(params, ['max_uploads', 'key_marker', + 'upload_id_marker', 'encoding_type', + 'delimiter', 'prefix']) + return self._get_all([('Upload', MultiPartUpload), + ('CommonPrefixes', Prefix)], + 'uploads', headers, **params) + + def new_key(self, key_name=None): + """ + Creates a new key + + :type key_name: string + :param key_name: The name of the key to create + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: An instance of the newly created key object + """ + if not key_name: + raise ValueError('Empty key names are not allowed') + return self.key_class(self, key_name) + + def generate_url(self, expires_in, method='GET', headers=None, + force_http=False, response_headers=None, + expires_in_absolute=False): + return self.connection.generate_url(expires_in, method, self.name, + headers=headers, + force_http=force_http, + response_headers=response_headers, + expires_in_absolute=expires_in_absolute) + + def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None): + """ + Deletes a set of keys using S3's Multi-object delete API. If a + VersionID is specified for that key then that version is removed. + Returns a MultiDeleteResult Object, which contains Deleted + and Error elements for each key you ask to delete. + + :type keys: list + :param keys: A list of either key_names or (key_name, versionid) pairs + or a list of Key instances. + + :type quiet: boolean + :param quiet: In quiet mode the response includes only keys + where the delete operation encountered an error. For a + successful deletion, the operation does not return any + information about the delete in the response body. + + :type mfa_token: tuple or list of strings + :param mfa_token: A tuple or list consisting of the serial + number from the MFA device and the current value of the + six-digit token associated with the device. This value is + required anytime you are deleting versioned objects from a + bucket that has the MFADelete option on the bucket. + + :returns: An instance of MultiDeleteResult + """ + ikeys = iter(keys) + result = MultiDeleteResult(self) + provider = self.connection.provider + query_args = 'delete' + + def delete_keys2(hdrs): + hdrs = hdrs or {} + data = u"""""" + data += u"" + if quiet: + data += u"true" + count = 0 + while count < 1000: + try: + key = next(ikeys) + except StopIteration: + break + if isinstance(key, six.string_types): + key_name = key + version_id = None + elif isinstance(key, tuple) and len(key) == 2: + key_name, version_id = key + elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name: + key_name = key.name + version_id = key.version_id + else: + if isinstance(key, Prefix): + key_name = key.name + code = 'PrefixSkipped' # Don't delete Prefix + else: + key_name = repr(key) # try get a string + code = 'InvalidArgument' # other unknown type + message = 'Invalid. No delete action taken for this object.' + error = Error(key_name, code=code, message=message) + result.errors.append(error) + continue + count += 1 + data += u"%s" % xml.sax.saxutils.escape(key_name) + if version_id: + data += u"%s" % version_id + data += u"" + data += u"" + if count <= 0: + return False # no more + data = data.encode('utf-8') + fp = BytesIO(data) + md5 = boto.utils.compute_md5(fp) + hdrs['Content-MD5'] = md5[1] + hdrs['Content-Type'] = 'text/xml' + if mfa_token: + hdrs[provider.mfa_header] = ' '.join(mfa_token) + response = self.connection.make_request('POST', self.name, + headers=hdrs, + query_args=query_args, + data=data) + body = response.read() + if response.status == 200: + h = handler.XmlHandler(result, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return count >= 1000 # more? + else: + raise provider.storage_response_error(response.status, + response.reason, + body) + while delete_keys2(headers): + pass + return result + + def delete_key(self, key_name, headers=None, version_id=None, + mfa_token=None): + """ + Deletes a key from the bucket. If a version_id is provided, + only that version of the key will be deleted. + + :type key_name: string + :param key_name: The key name to delete + + :type version_id: string + :param version_id: The version ID (optional) + + :type mfa_token: tuple or list of strings + :param mfa_token: A tuple or list consisting of the serial + number from the MFA device and the current value of the + six-digit token associated with the device. This value is + required anytime you are deleting versioned objects from a + bucket that has the MFADelete option on the bucket. + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: A key object holding information on what was + deleted. The Caller can see if a delete_marker was + created or removed and what version_id the delete created + or removed. + """ + if not key_name: + raise ValueError('Empty key names are not allowed') + return self._delete_key_internal(key_name, headers=headers, + version_id=version_id, + mfa_token=mfa_token, + query_args_l=None) + + def _delete_key_internal(self, key_name, headers=None, version_id=None, + mfa_token=None, query_args_l=None): + query_args_l = query_args_l or [] + provider = self.connection.provider + if version_id: + query_args_l.append('versionId=%s' % version_id) + query_args = '&'.join(query_args_l) or None + if mfa_token: + if not headers: + headers = {} + headers[provider.mfa_header] = ' '.join(mfa_token) + response = self.connection.make_request('DELETE', self.name, key_name, + headers=headers, + query_args=query_args) + body = response.read() + if response.status != 204: + raise provider.storage_response_error(response.status, + response.reason, body) + else: + # return a key object with information on what was deleted. + k = self.key_class(self) + k.name = key_name + k.handle_version_headers(response) + k.handle_addl_headers(response.getheaders()) + return k + + def copy_key(self, new_key_name, src_bucket_name, + src_key_name, metadata=None, src_version_id=None, + storage_class='STANDARD', preserve_acl=False, + encrypt_key=False, headers=None, query_args=None): + """ + Create a new key in the bucket by copying another existing key. + + :type new_key_name: string + :param new_key_name: The name of the new key + + :type src_bucket_name: string + :param src_bucket_name: The name of the source bucket + + :type src_key_name: string + :param src_key_name: The name of the source key + + :type src_version_id: string + :param src_version_id: The version id for the key. This param + is optional. If not specified, the newest version of the + key will be copied. + + :type metadata: dict + :param metadata: Metadata to be associated with new key. If + metadata is supplied, it will replace the metadata of the + source key being copied. If no metadata is supplied, the + source key's metadata will be copied to the new key. + + :type storage_class: string + :param storage_class: The storage class of the new key. By + default, the new key will use the standard storage class. + Possible values are: STANDARD | REDUCED_REDUNDANCY + + :type preserve_acl: bool + :param preserve_acl: If True, the ACL from the source key will + be copied to the destination key. If False, the + destination key will have the default ACL. Note that + preserving the ACL in the new key object will require two + additional API calls to S3, one to retrieve the current + ACL and one to set that ACL on the new object. If you + don't care about the ACL, a value of False will be + significantly more efficient. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :type headers: dict + :param headers: A dictionary of header name/value pairs. + + :type query_args: string + :param query_args: A string of additional querystring arguments + to append to the request + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: An instance of the newly created key object + """ + headers = headers or {} + provider = self.connection.provider + src_key_name = boto.utils.get_utf8_value(src_key_name) + if preserve_acl: + if self.name == src_bucket_name: + src_bucket = self + else: + src_bucket = self.connection.get_bucket( + src_bucket_name, validate=False) + acl = src_bucket.get_xml_acl(src_key_name) + if encrypt_key: + headers[provider.server_side_encryption_header] = 'AES256' + src = '%s/%s' % (src_bucket_name, urllib.parse.quote(src_key_name)) + if src_version_id: + src += '?versionId=%s' % src_version_id + headers[provider.copy_source_header] = str(src) + # make sure storage_class_header key exists before accessing it + if provider.storage_class_header and storage_class: + headers[provider.storage_class_header] = storage_class + if metadata is not None: + headers[provider.metadata_directive_header] = 'REPLACE' + headers = boto.utils.merge_meta(headers, metadata, provider) + elif not query_args: # Can't use this header with multi-part copy. + headers[provider.metadata_directive_header] = 'COPY' + response = self.connection.make_request('PUT', self.name, new_key_name, + headers=headers, + query_args=query_args) + body = response.read() + if response.status == 200: + key = self.new_key(new_key_name) + h = handler.XmlHandler(key, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + if hasattr(key, 'Error'): + raise provider.storage_copy_error(key.Code, key.Message, body) + key.handle_version_headers(response) + key.handle_addl_headers(response.getheaders()) + if preserve_acl: + self.set_xml_acl(acl, new_key_name) + return key + else: + raise provider.storage_response_error(response.status, + response.reason, body) + + def set_canned_acl(self, acl_str, key_name='', headers=None, + version_id=None): + assert acl_str in CannedACLStrings + + if headers: + headers[self.connection.provider.acl_header] = acl_str + else: + headers = {self.connection.provider.acl_header: acl_str} + + query_args = 'acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('PUT', self.name, key_name, + headers=headers, query_args=query_args) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_xml_acl(self, key_name='', headers=None, version_id=None): + query_args = 'acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return body + + def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None, + query_args='acl'): + if version_id: + query_args += '&versionId=%s' % version_id + if not isinstance(acl_str, bytes): + acl_str = acl_str.encode('utf-8') + response = self.connection.make_request('PUT', self.name, key_name, + data=acl_str, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None): + if isinstance(acl_or_str, Policy): + self.set_xml_acl(acl_or_str.to_xml(), key_name, + headers, version_id) + else: + self.set_canned_acl(acl_or_str, key_name, + headers, version_id) + + def get_acl(self, key_name='', headers=None, version_id=None): + query_args = 'acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status == 200: + policy = Policy(self) + h = handler.XmlHandler(policy, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return policy + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_subresource(self, subresource, value, key_name='', headers=None, + version_id=None): + """ + Set a subresource for a bucket or key. + + :type subresource: string + :param subresource: The subresource to set. + + :type value: string + :param value: The value of the subresource. + + :type key_name: string + :param key_name: The key to operate on, or None to operate on the + bucket. + + :type headers: dict + :param headers: Additional HTTP headers to include in the request. + + :type src_version_id: string + :param src_version_id: Optional. The version id of the key to + operate on. If not specified, operate on the newest + version. + """ + if not subresource: + raise TypeError('set_subresource called with subresource=None') + query_args = subresource + if version_id: + query_args += '&versionId=%s' % version_id + if not isinstance(value, bytes): + value = value.encode('utf-8') + response = self.connection.make_request('PUT', self.name, key_name, + data=value, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_subresource(self, subresource, key_name='', headers=None, + version_id=None): + """ + Get a subresource for a bucket or key. + + :type subresource: string + :param subresource: The subresource to get. + + :type key_name: string + :param key_name: The key to operate on, or None to operate on the + bucket. + + :type headers: dict + :param headers: Additional HTTP headers to include in the request. + + :type src_version_id: string + :param src_version_id: Optional. The version id of the key to + operate on. If not specified, operate on the newest + version. + + :rtype: string + :returns: The value of the subresource. + """ + if not subresource: + raise TypeError('get_subresource called with subresource=None') + query_args = subresource + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return body + + def make_public(self, recursive=False, headers=None): + self.set_canned_acl('public-read', headers=headers) + if recursive: + for key in self: + self.set_canned_acl('public-read', key.name, headers=headers) + + def add_email_grant(self, permission, email_address, + recursive=False, headers=None): + """ + Convenience method that provides a quick way to add an email grant + to a bucket. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL + and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type email_address: string + :param email_address: The email address associated with the AWS + account your are granting the permission to. + + :type recursive: boolean + :param recursive: A boolean value to controls whether the + command will apply the grant to all keys within the bucket + or not. The default value is False. By passing a True + value, the call will iterate through all keys in the + bucket and apply the same grant to each key. CAUTION: If + you have a lot of keys, this could take a long time! + """ + if permission not in S3Permissions: + raise self.connection.provider.storage_permissions_error( + 'Unknown Permission: %s' % permission) + policy = self.get_acl(headers=headers) + policy.acl.add_email_grant(permission, email_address) + self.set_acl(policy, headers=headers) + if recursive: + for key in self: + key.add_email_grant(permission, email_address, headers=headers) + + def add_user_grant(self, permission, user_id, recursive=False, + headers=None, display_name=None): + """ + Convenience method that provides a quick way to add a canonical + user grant to a bucket. This method retrieves the current ACL, + creates a new grant based on the parameters passed in, adds that + grant to the ACL and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type user_id: string + :param user_id: The canonical user id associated with the AWS + account your are granting the permission to. + + :type recursive: boolean + :param recursive: A boolean value to controls whether the + command will apply the grant to all keys within the bucket + or not. The default value is False. By passing a True + value, the call will iterate through all keys in the + bucket and apply the same grant to each key. CAUTION: If + you have a lot of keys, this could take a long time! + + :type display_name: string + :param display_name: An option string containing the user's + Display Name. Only required on Walrus. + """ + if permission not in S3Permissions: + raise self.connection.provider.storage_permissions_error( + 'Unknown Permission: %s' % permission) + policy = self.get_acl(headers=headers) + policy.acl.add_user_grant(permission, user_id, + display_name=display_name) + self.set_acl(policy, headers=headers) + if recursive: + for key in self: + key.add_user_grant(permission, user_id, headers=headers, + display_name=display_name) + + def list_grants(self, headers=None): + policy = self.get_acl(headers=headers) + return policy.acl.grants + + def get_location(self): + """ + Returns the LocationConstraint for the bucket. + + :rtype: str + :return: The LocationConstraint for the bucket or the empty + string if no constraint was specified when bucket was created. + """ + response = self.connection.make_request('GET', self.name, + query_args='location') + body = response.read() + if response.status == 200: + rs = ResultSet(self) + h = handler.XmlHandler(rs, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return rs.LocationConstraint + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_xml_logging(self, logging_str, headers=None): + """ + Set logging on a bucket directly to the given xml string. + + :type logging_str: unicode string + :param logging_str: The XML for the bucketloggingstatus which + will be set. The string will be converted to utf-8 before + it is sent. Usually, you will obtain this XML from the + BucketLogging object. + + :rtype: bool + :return: True if ok or raises an exception. + """ + body = logging_str + if not isinstance(body, bytes): + body = body.encode('utf-8') + response = self.connection.make_request('PUT', self.name, data=body, + query_args='logging', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def enable_logging(self, target_bucket, target_prefix='', + grants=None, headers=None): + """ + Enable logging on a bucket. + + :type target_bucket: bucket or string + :param target_bucket: The bucket to log to. + + :type target_prefix: string + :param target_prefix: The prefix which should be prepended to the + generated log files written to the target_bucket. + + :type grants: list of Grant objects + :param grants: A list of extra permissions which will be granted on + the log files which are created. + + :rtype: bool + :return: True if ok or raises an exception. + """ + if isinstance(target_bucket, Bucket): + target_bucket = target_bucket.name + blogging = BucketLogging(target=target_bucket, prefix=target_prefix, + grants=grants) + return self.set_xml_logging(blogging.to_xml(), headers=headers) + + def disable_logging(self, headers=None): + """ + Disable logging on a bucket. + + :rtype: bool + :return: True if ok or raises an exception. + """ + blogging = BucketLogging() + return self.set_xml_logging(blogging.to_xml(), headers=headers) + + def get_logging_status(self, headers=None): + """ + Get the logging status for this bucket. + + :rtype: :class:`boto.s3.bucketlogging.BucketLogging` + :return: A BucketLogging object for this bucket. + """ + response = self.connection.make_request('GET', self.name, + query_args='logging', headers=headers) + body = response.read() + if response.status == 200: + blogging = BucketLogging() + h = handler.XmlHandler(blogging, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return blogging + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_as_logging_target(self, headers=None): + """ + Setup the current bucket as a logging target by granting the necessary + permissions to the LogDelivery group to write log files to this bucket. + """ + policy = self.get_acl(headers=headers) + g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup) + g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup) + policy.acl.add_grant(g1) + policy.acl.add_grant(g2) + self.set_acl(policy, headers=headers) + + def get_request_payment(self, headers=None): + response = self.connection.make_request('GET', self.name, + query_args='requestPayment', headers=headers) + body = response.read() + if response.status == 200: + return body + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_request_payment(self, payer='BucketOwner', headers=None): + body = self.BucketPaymentBody % payer + response = self.connection.make_request('PUT', self.name, data=body, + query_args='requestPayment', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def configure_versioning(self, versioning, mfa_delete=False, + mfa_token=None, headers=None): + """ + Configure versioning for this bucket. + + ..note:: This feature is currently in beta. + + :type versioning: bool + :param versioning: A boolean indicating whether version is + enabled (True) or disabled (False). + + :type mfa_delete: bool + :param mfa_delete: A boolean indicating whether the + Multi-Factor Authentication Delete feature is enabled + (True) or disabled (False). If mfa_delete is enabled then + all Delete operations will require the token from your MFA + device to be passed in the request. + + :type mfa_token: tuple or list of strings + :param mfa_token: A tuple or list consisting of the serial + number from the MFA device and the current value of the + six-digit token associated with the device. This value is + required when you are changing the status of the MfaDelete + property of the bucket. + """ + if versioning: + ver = 'Enabled' + else: + ver = 'Suspended' + if mfa_delete: + mfa = 'Enabled' + else: + mfa = 'Disabled' + body = self.VersioningBody % (ver, mfa) + if mfa_token: + if not headers: + headers = {} + provider = self.connection.provider + headers[provider.mfa_header] = ' '.join(mfa_token) + response = self.connection.make_request('PUT', self.name, data=body, + query_args='versioning', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_versioning_status(self, headers=None): + """ + Returns the current status of versioning on the bucket. + + :rtype: dict + :returns: A dictionary containing a key named 'Versioning' + that can have a value of either Enabled, Disabled, or + Suspended. Also, if MFADelete has ever been enabled on the + bucket, the dictionary will contain a key named + 'MFADelete' which will have a value of either Enabled or + Suspended. + """ + response = self.connection.make_request('GET', self.name, + query_args='versioning', headers=headers) + body = response.read() + if not isinstance(body, six.string_types): + body = body.decode('utf-8') + boto.log.debug(body) + if response.status == 200: + d = {} + ver = re.search(self.VersionRE, body) + if ver: + d['Versioning'] = ver.group(1) + mfa = re.search(self.MFADeleteRE, body) + if mfa: + d['MfaDelete'] = mfa.group(1) + return d + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def configure_lifecycle(self, lifecycle_config, headers=None): + """ + Configure lifecycle for this bucket. + + :type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle` + :param lifecycle_config: The lifecycle configuration you want + to configure for this bucket. + """ + xml = lifecycle_config.to_xml() + #xml = xml.encode('utf-8') + fp = StringIO(xml) + md5 = boto.utils.compute_md5(fp) + if headers is None: + headers = {} + headers['Content-MD5'] = md5[1] + headers['Content-Type'] = 'text/xml' + response = self.connection.make_request('PUT', self.name, + data=fp.getvalue(), + query_args='lifecycle', + headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_lifecycle_config(self, headers=None): + """ + Returns the current lifecycle configuration on the bucket. + + :rtype: :class:`boto.s3.lifecycle.Lifecycle` + :returns: A LifecycleConfig object that describes all current + lifecycle rules in effect for the bucket. + """ + response = self.connection.make_request('GET', self.name, + query_args='lifecycle', headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + lifecycle = Lifecycle() + h = handler.XmlHandler(lifecycle, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return lifecycle + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def delete_lifecycle_configuration(self, headers=None): + """ + Removes all lifecycle configuration from the bucket. + """ + response = self.connection.make_request('DELETE', self.name, + query_args='lifecycle', + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def configure_website(self, suffix=None, error_key=None, + redirect_all_requests_to=None, + routing_rules=None, + headers=None): + """ + Configure this bucket to act as a website + + :type suffix: str + :param suffix: Suffix that is appended to a request that is for a + "directory" on the website endpoint (e.g. if the suffix is + index.html and you make a request to samplebucket/images/ + the data that is returned will be for the object with the + key name images/index.html). The suffix must not be empty + and must not include a slash character. + + :type error_key: str + :param error_key: The object key name to use when a 4XX class + error occurs. This is optional. + + :type redirect_all_requests_to: :class:`boto.s3.website.RedirectLocation` + :param redirect_all_requests_to: Describes the redirect behavior for + every request to this bucket's website endpoint. If this value is + non None, no other values are considered when configuring the + website configuration for the bucket. This is an instance of + ``RedirectLocation``. + + :type routing_rules: :class:`boto.s3.website.RoutingRules` + :param routing_rules: Object which specifies conditions + and redirects that apply when the conditions are met. + + """ + config = website.WebsiteConfiguration( + suffix, error_key, redirect_all_requests_to, + routing_rules) + return self.set_website_configuration(config, headers=headers) + + def set_website_configuration(self, config, headers=None): + """ + :type config: boto.s3.website.WebsiteConfiguration + :param config: Configuration data + """ + return self.set_website_configuration_xml(config.to_xml(), + headers=headers) + + + def set_website_configuration_xml(self, xml, headers=None): + """Upload xml website configuration""" + response = self.connection.make_request('PUT', self.name, data=xml, + query_args='website', + headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_website_configuration(self, headers=None): + """ + Returns the current status of website configuration on the bucket. + + :rtype: dict + :returns: A dictionary containing a Python representation + of the XML response from S3. The overall structure is: + + * WebsiteConfiguration + + * IndexDocument + + * Suffix : suffix that is appended to request that + is for a "directory" on the website endpoint + * ErrorDocument + + * Key : name of object to serve when an error occurs + + """ + return self.get_website_configuration_with_xml(headers)[0] + + def get_website_configuration_obj(self, headers=None): + """Get the website configuration as a + :class:`boto.s3.website.WebsiteConfiguration` object. + """ + config_xml = self.get_website_configuration_xml(headers=headers) + config = website.WebsiteConfiguration() + h = handler.XmlHandler(config, self) + xml.sax.parseString(config_xml, h) + return config + + def get_website_configuration_with_xml(self, headers=None): + """ + Returns the current status of website configuration on the bucket as + unparsed XML. + + :rtype: 2-Tuple + :returns: 2-tuple containing: + + 1) A dictionary containing a Python representation \ + of the XML response. The overall structure is: + + * WebsiteConfiguration + + * IndexDocument + + * Suffix : suffix that is appended to request that \ + is for a "directory" on the website endpoint + + * ErrorDocument + + * Key : name of object to serve when an error occurs + + + 2) unparsed XML describing the bucket's website configuration + + """ + + body = self.get_website_configuration_xml(headers=headers) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e, body + + def get_website_configuration_xml(self, headers=None): + """Get raw website configuration xml""" + response = self.connection.make_request('GET', self.name, + query_args='website', headers=headers) + body = response.read().decode('utf-8') + boto.log.debug(body) + + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return body + + def delete_website_configuration(self, headers=None): + """ + Removes all website configuration from the bucket. + """ + response = self.connection.make_request('DELETE', self.name, + query_args='website', headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_website_endpoint(self): + """ + Returns the fully qualified hostname to use is you want to access this + bucket as a website. This doesn't validate whether the bucket has + been correctly configured as a website or not. + """ + l = [self.name] + l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location())) + l.append('.'.join(self.connection.host.split('.')[-2:])) + return '.'.join(l) + + def get_policy(self, headers=None): + """ + Returns the JSON policy associated with the bucket. The policy + is returned as an uninterpreted JSON string. + """ + response = self.connection.make_request('GET', self.name, + query_args='policy', headers=headers) + body = response.read() + if response.status == 200: + return body + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_policy(self, policy, headers=None): + """ + Add or replace the JSON policy associated with the bucket. + + :type policy: str + :param policy: The JSON policy as a string. + """ + response = self.connection.make_request('PUT', self.name, + data=policy, + query_args='policy', + headers=headers) + body = response.read() + if response.status >= 200 and response.status <= 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def delete_policy(self, headers=None): + response = self.connection.make_request('DELETE', self.name, + data='/?policy', + query_args='policy', + headers=headers) + body = response.read() + if response.status >= 200 and response.status <= 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_cors_xml(self, cors_xml, headers=None): + """ + Set the CORS (Cross-Origin Resource Sharing) for a bucket. + + :type cors_xml: str + :param cors_xml: The XML document describing your desired + CORS configuration. See the S3 documentation for details + of the exact syntax required. + """ + fp = StringIO(cors_xml) + md5 = boto.utils.compute_md5(fp) + if headers is None: + headers = {} + headers['Content-MD5'] = md5[1] + headers['Content-Type'] = 'text/xml' + response = self.connection.make_request('PUT', self.name, + data=fp.getvalue(), + query_args='cors', + headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_cors(self, cors_config, headers=None): + """ + Set the CORS for this bucket given a boto CORSConfiguration + object. + + :type cors_config: :class:`boto.s3.cors.CORSConfiguration` + :param cors_config: The CORS configuration you want + to configure for this bucket. + """ + return self.set_cors_xml(cors_config.to_xml()) + + def get_cors_xml(self, headers=None): + """ + Returns the current CORS configuration on the bucket as an + XML document. + """ + response = self.connection.make_request('GET', self.name, + query_args='cors', headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return body + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_cors(self, headers=None): + """ + Returns the current CORS configuration on the bucket. + + :rtype: :class:`boto.s3.cors.CORSConfiguration` + :returns: A CORSConfiguration object that describes all current + CORS rules in effect for the bucket. + """ + body = self.get_cors_xml(headers) + cors = CORSConfiguration() + h = handler.XmlHandler(cors, self) + xml.sax.parseString(body, h) + return cors + + def delete_cors(self, headers=None): + """ + Removes all CORS configuration from the bucket. + """ + response = self.connection.make_request('DELETE', self.name, + query_args='cors', + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def initiate_multipart_upload(self, key_name, headers=None, + reduced_redundancy=False, + metadata=None, encrypt_key=False, + policy=None): + """ + Start a multipart upload operation. + + .. note:: + + Note: After you initiate multipart upload and upload one or more + parts, you must either complete or abort multipart upload in order + to stop getting charged for storage of the uploaded parts. Only + after you either complete or abort multipart upload, Amazon S3 + frees up the parts storage and stops charging you for the parts + storage. + + :type key_name: string + :param key_name: The name of the key that will ultimately + result from this multipart upload operation. This will be + exactly as the key appears in the bucket after the upload + process has been completed. + + :type headers: dict + :param headers: Additional HTTP headers to send and store with the + resulting key in S3. + + :type reduced_redundancy: boolean + :param reduced_redundancy: In multipart uploads, the storage + class is specified when initiating the upload, not when + uploading individual parts. So if you want the resulting + key to use the reduced redundancy storage class set this + flag when you initiate the upload. + + :type metadata: dict + :param metadata: Any metadata that you would like to set on the key + that results from the multipart upload. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key (once completed) in S3. + """ + query_args = 'uploads' + provider = self.connection.provider + headers = headers or {} + if policy: + headers[provider.acl_header] = policy + if reduced_redundancy: + storage_class_header = provider.storage_class_header + if storage_class_header: + headers[storage_class_header] = 'REDUCED_REDUNDANCY' + # TODO: what if the provider doesn't support reduced redundancy? + # (see boto.s3.key.Key.set_contents_from_file) + if encrypt_key: + headers[provider.server_side_encryption_header] = 'AES256' + if metadata is None: + metadata = {} + + headers = boto.utils.merge_meta(headers, metadata, + self.connection.provider) + response = self.connection.make_request('POST', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + resp = MultiPartUpload(self) + h = handler.XmlHandler(resp, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return resp + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def complete_multipart_upload(self, key_name, upload_id, + xml_body, headers=None): + """ + Complete a multipart upload operation. + """ + query_args = 'uploadId=%s' % upload_id + if headers is None: + headers = {} + headers['Content-Type'] = 'text/xml' + response = self.connection.make_request('POST', self.name, key_name, + query_args=query_args, + headers=headers, data=xml_body) + contains_error = False + body = response.read().decode('utf-8') + # Some errors will be reported in the body of the response + # even though the HTTP response code is 200. This check + # does a quick and dirty peek in the body for an error element. + if body.find('') > 0: + contains_error = True + boto.log.debug(body) + if response.status == 200 and not contains_error: + resp = CompleteMultiPartUpload(self) + h = handler.XmlHandler(resp, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + # Use a dummy key to parse various response headers + # for versioning, encryption info and then explicitly + # set the completed MPU object values from key. + k = self.key_class(self) + k.handle_version_headers(response) + k.handle_encryption_headers(response) + resp.version_id = k.version_id + resp.encrypted = k.encrypted + return resp + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def cancel_multipart_upload(self, key_name, upload_id, headers=None): + """ + To verify that all parts have been removed, so you don't get charged + for the part storage, you should call the List Parts operation and + ensure the parts list is empty. + """ + query_args = 'uploadId=%s' % upload_id + response = self.connection.make_request('DELETE', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status != 204: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def delete(self, headers=None): + return self.connection.delete_bucket(self.name, headers=headers) + + def get_tags(self): + response = self.get_xml_tags() + tags = Tags() + h = handler.XmlHandler(tags, self) + if not isinstance(response, bytes): + response = response.encode('utf-8') + xml.sax.parseString(response, h) + return tags + + def get_xml_tags(self): + response = self.connection.make_request('GET', self.name, + query_args='tagging', + headers=None) + body = response.read() + if response.status == 200: + return body + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_xml_tags(self, tag_str, headers=None, query_args='tagging'): + if headers is None: + headers = {} + md5 = boto.utils.compute_md5(StringIO(tag_str)) + headers['Content-MD5'] = md5[1] + headers['Content-Type'] = 'text/xml' + if not isinstance(tag_str, bytes): + tag_str = tag_str.encode('utf-8') + response = self.connection.make_request('PUT', self.name, + data=tag_str, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 204: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return True + + def set_tags(self, tags, headers=None): + return self.set_xml_tags(tags.to_xml(), headers=headers) + + def delete_tags(self, headers=None): + response = self.connection.make_request('DELETE', self.name, + query_args='tagging', + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/bucketlistresultset.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/bucketlistresultset.py new file mode 100644 index 0000000000000000000000000000000000000000..ab9c65e4568f4a70ec1f7dbc928b355e173c5a22 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/bucketlistresultset.py @@ -0,0 +1,156 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.compat import urllib, six + +def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None, + encoding_type=None): + """ + A generator function for listing keys in a bucket. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_keys(prefix=prefix, marker=marker, + delimiter=delimiter, headers=headers, + encoding_type=encoding_type) + for k in rs: + yield k + if k: + marker = rs.next_marker or k.name + if marker and encoding_type == "url": + if isinstance(marker, six.text_type): + marker = marker.encode('utf-8') + marker = urllib.parse.unquote(marker) + more_results= rs.is_truncated + +class BucketListResultSet(object): + """ + A resultset for listing keys within a bucket. Uses the bucket_lister + generator function and implements the iterator interface. This + transparently handles the results paging from S3 so even if you have + many thousands of keys within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + + def __init__(self, bucket=None, prefix='', delimiter='', marker='', + headers=None, encoding_type=None): + self.bucket = bucket + self.prefix = prefix + self.delimiter = delimiter + self.marker = marker + self.headers = headers + self.encoding_type = encoding_type + + def __iter__(self): + return bucket_lister(self.bucket, prefix=self.prefix, + delimiter=self.delimiter, marker=self.marker, + headers=self.headers, + encoding_type=self.encoding_type) + +def versioned_bucket_lister(bucket, prefix='', delimiter='', + key_marker='', version_id_marker='', headers=None, + encoding_type=None): + """ + A generator function for listing versions in a bucket. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker, + version_id_marker=version_id_marker, + delimiter=delimiter, headers=headers, + max_keys=999, encoding_type=encoding_type) + for k in rs: + yield k + key_marker = rs.next_key_marker + version_id_marker = rs.next_version_id_marker + more_results= rs.is_truncated + +class VersionedBucketListResultSet(object): + """ + A resultset for listing versions within a bucket. Uses the bucket_lister + generator function and implements the iterator interface. This + transparently handles the results paging from S3 so even if you have + many thousands of keys within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + + def __init__(self, bucket=None, prefix='', delimiter='', key_marker='', + version_id_marker='', headers=None, encoding_type=None): + self.bucket = bucket + self.prefix = prefix + self.delimiter = delimiter + self.key_marker = key_marker + self.version_id_marker = version_id_marker + self.headers = headers + self.encoding_type = encoding_type + + def __iter__(self): + return versioned_bucket_lister(self.bucket, prefix=self.prefix, + delimiter=self.delimiter, + key_marker=self.key_marker, + version_id_marker=self.version_id_marker, + headers=self.headers, + encoding_type=self.encoding_type) + +def multipart_upload_lister(bucket, key_marker='', + upload_id_marker='', + headers=None, encoding_type=None): + """ + A generator function for listing multipart uploads in a bucket. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_multipart_uploads(key_marker=key_marker, + upload_id_marker=upload_id_marker, + headers=headers, + encoding_type=encoding_type) + for k in rs: + yield k + key_marker = rs.next_key_marker + upload_id_marker = rs.next_upload_id_marker + more_results= rs.is_truncated + +class MultiPartUploadListResultSet(object): + """ + A resultset for listing multipart uploads within a bucket. + Uses the multipart_upload_lister generator function and + implements the iterator interface. This + transparently handles the results paging from S3 so even if you have + many thousands of uploads within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + def __init__(self, bucket=None, key_marker='', + upload_id_marker='', headers=None, encoding_type=None): + self.bucket = bucket + self.key_marker = key_marker + self.upload_id_marker = upload_id_marker + self.headers = headers + self.encoding_type = encoding_type + + def __iter__(self): + return multipart_upload_lister(self.bucket, + key_marker=self.key_marker, + upload_id_marker=self.upload_id_marker, + headers=self.headers, + encoding_type=self.encoding_type) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/bucketlogging.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/bucketlogging.py new file mode 100644 index 0000000000000000000000000000000000000000..38cef1140e22cfaaafa0ada22f4f2edc9b175c87 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/bucketlogging.py @@ -0,0 +1,83 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax.saxutils +from boto.s3.acl import Grant + +class BucketLogging(object): + + def __init__(self, target=None, prefix=None, grants=None): + self.target = target + self.prefix = prefix + if grants is None: + self.grants = [] + else: + self.grants = grants + + def __repr__(self): + if self.target is None: + return "" + grants = [] + for g in self.grants: + if g.type == 'CanonicalUser': + u = g.display_name + elif g.type == 'Group': + u = g.uri + else: + u = g.email_address + grants.append("%s = %s" % (u, g.permission)) + return "" % (self.target, self.prefix, ", ".join(grants)) + + def add_grant(self, grant): + self.grants.append(grant) + + def startElement(self, name, attrs, connection): + if name == 'Grant': + self.grants.append(Grant()) + return self.grants[-1] + else: + return None + + def endElement(self, name, value, connection): + if name == 'TargetBucket': + self.target = value + elif name == 'TargetPrefix': + self.prefix = value + else: + setattr(self, name, value) + + def to_xml(self): + # caller is responsible to encode to utf-8 + s = u'' + s += u'' + if self.target is not None: + s += u'' + s += u'%s' % self.target + prefix = self.prefix or '' + s += u'%s' % xml.sax.saxutils.escape(prefix) + if self.grants: + s += '' + for grant in self.grants: + s += grant.to_xml() + s += '' + s += u'' + s += u'' + return s diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..0fcc1f5957fc9fbd7981f35baefb2eca006ff9bb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/connection.py @@ -0,0 +1,665 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax +import base64 +from boto.compat import six, urllib +import time + +from boto.auth import detect_potential_s3sigv4 +import boto.utils +from boto.connection import AWSAuthConnection +from boto import handler +from boto.s3.bucket import Bucket +from boto.s3.key import Key +from boto.resultset import ResultSet +from boto.exception import BotoClientError, S3ResponseError + + +def check_lowercase_bucketname(n): + """ + Bucket names must not contain uppercase characters. We check for + this by appending a lowercase character and testing with islower(). + Note this also covers cases like numeric bucket names with dashes. + + >>> check_lowercase_bucketname("Aaaa") + Traceback (most recent call last): + ... + BotoClientError: S3Error: Bucket names cannot contain upper-case + characters when using either the sub-domain or virtual hosting calling + format. + + >>> check_lowercase_bucketname("1234-5678-9123") + True + >>> check_lowercase_bucketname("abcdefg1234") + True + """ + if not (n + 'a').islower(): + raise BotoClientError("Bucket names cannot contain upper-case " \ + "characters when using either the sub-domain or virtual " \ + "hosting calling format.") + return True + + +def assert_case_insensitive(f): + def wrapper(*args, **kwargs): + if len(args) == 3 and check_lowercase_bucketname(args[2]): + pass + return f(*args, **kwargs) + return wrapper + + +class _CallingFormat(object): + + def get_bucket_server(self, server, bucket): + return '' + + def build_url_base(self, connection, protocol, server, bucket, key=''): + url_base = '%s://' % protocol + url_base += self.build_host(server, bucket) + url_base += connection.get_path(self.build_path_base(bucket, key)) + return url_base + + def build_host(self, server, bucket): + if bucket == '': + return server + else: + return self.get_bucket_server(server, bucket) + + def build_auth_path(self, bucket, key=''): + key = boto.utils.get_utf8_value(key) + path = '' + if bucket != '': + path = '/' + bucket + return path + '/%s' % urllib.parse.quote(key) + + def build_path_base(self, bucket, key=''): + key = boto.utils.get_utf8_value(key) + return '/%s' % urllib.parse.quote(key) + + +class SubdomainCallingFormat(_CallingFormat): + + @assert_case_insensitive + def get_bucket_server(self, server, bucket): + return '%s.%s' % (bucket, server) + + +class VHostCallingFormat(_CallingFormat): + + @assert_case_insensitive + def get_bucket_server(self, server, bucket): + return bucket + + +class OrdinaryCallingFormat(_CallingFormat): + + def get_bucket_server(self, server, bucket): + return server + + def build_path_base(self, bucket, key=''): + key = boto.utils.get_utf8_value(key) + path_base = '/' + if bucket: + path_base += "%s/" % bucket + return path_base + urllib.parse.quote(key) + + +class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat): + + def build_url_base(self, connection, protocol, server, bucket, key=''): + url_base = '//' + url_base += self.build_host(server, bucket) + url_base += connection.get_path(self.build_path_base(bucket, key)) + return url_base + + +class Location(object): + + DEFAULT = '' # US Classic Region + EU = 'EU' + USWest = 'us-west-1' + USWest2 = 'us-west-2' + SAEast = 'sa-east-1' + APNortheast = 'ap-northeast-1' + APSoutheast = 'ap-southeast-1' + APSoutheast2 = 'ap-southeast-2' + CNNorth1 = 'cn-north-1' + + +class NoHostProvided(object): + # An identifying object to help determine whether the user provided a + # ``host`` or not. Never instantiated. + pass + + +class HostRequiredError(BotoClientError): + pass + + +class S3Connection(AWSAuthConnection): + + DefaultHost = boto.config.get('s3', 'host', 's3.amazonaws.com') + DefaultCallingFormat = boto.config.get('s3', 'calling_format', 'boto.s3.connection.SubdomainCallingFormat') + QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, + host=NoHostProvided, debug=0, https_connection_factory=None, + calling_format=DefaultCallingFormat, path='/', + provider='aws', bucket_class=Bucket, security_token=None, + suppress_consec_slashes=True, anon=False, + validate_certs=None, profile_name=None): + no_host_provided = False + if host is NoHostProvided: + no_host_provided = True + host = self.DefaultHost + if isinstance(calling_format, six.string_types): + calling_format=boto.utils.find_class(calling_format)() + self.calling_format = calling_format + self.bucket_class = bucket_class + self.anon = anon + super(S3Connection, self).__init__(host, + aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + debug=debug, https_connection_factory=https_connection_factory, + path=path, provider=provider, security_token=security_token, + suppress_consec_slashes=suppress_consec_slashes, + validate_certs=validate_certs, profile_name=profile_name) + # We need to delay until after the call to ``super`` before checking + # to see if SigV4 is in use. + if no_host_provided: + if 'hmac-v4-s3' in self._required_auth_capability(): + raise HostRequiredError( + "When using SigV4, you must specify a 'host' parameter." + ) + + @detect_potential_s3sigv4 + def _required_auth_capability(self): + if self.anon: + return ['anon'] + else: + return ['s3'] + + def __iter__(self): + for bucket in self.get_all_buckets(): + yield bucket + + def __contains__(self, bucket_name): + return not (self.lookup(bucket_name) is None) + + def set_bucket_class(self, bucket_class): + """ + Set the Bucket class associated with this bucket. By default, this + would be the boto.s3.key.Bucket class but if you want to subclass that + for some reason this allows you to associate your new class. + + :type bucket_class: class + :param bucket_class: A subclass of Bucket that can be more specific + """ + self.bucket_class = bucket_class + + def build_post_policy(self, expiration_time, conditions): + """ + Taken from the AWS book Python examples and modified for use with boto + """ + assert isinstance(expiration_time, time.struct_time), \ + 'Policy document must include a valid expiration Time object' + + # Convert conditions object mappings to condition statements + + return '{"expiration": "%s",\n"conditions": [%s]}' % \ + (time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions)) + + def build_post_form_args(self, bucket_name, key, expires_in=6000, + acl=None, success_action_redirect=None, + max_content_length=None, + http_method='http', fields=None, + conditions=None, storage_class='STANDARD', + server_side_encryption=None): + """ + Taken from the AWS book Python examples and modified for use with boto + This only returns the arguments required for the post form, not the + actual form. This does not return the file input field which also + needs to be added + + :type bucket_name: string + :param bucket_name: Bucket to submit to + + :type key: string + :param key: Key name, optionally add ${filename} to the end to + attach the submitted filename + + :type expires_in: integer + :param expires_in: Time (in seconds) before this expires, defaults + to 6000 + + :type acl: string + :param acl: A canned ACL. One of: + * private + * public-read + * public-read-write + * authenticated-read + * bucket-owner-read + * bucket-owner-full-control + + :type success_action_redirect: string + :param success_action_redirect: URL to redirect to on success + + :type max_content_length: integer + :param max_content_length: Maximum size for this file + + :type http_method: string + :param http_method: HTTP Method to use, "http" or "https" + + :type storage_class: string + :param storage_class: Storage class to use for storing the object. + Valid values: STANDARD | REDUCED_REDUNDANCY + + :type server_side_encryption: string + :param server_side_encryption: Specifies server-side encryption + algorithm to use when Amazon S3 creates an object. + Valid values: None | AES256 + + :rtype: dict + :return: A dictionary containing field names/values as well as + a url to POST to + + .. code-block:: python + + + """ + if fields is None: + fields = [] + if conditions is None: + conditions = [] + expiration = time.gmtime(int(time.time() + expires_in)) + + # Generate policy document + conditions.append('{"bucket": "%s"}' % bucket_name) + if key.endswith("${filename}"): + conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")]) + else: + conditions.append('{"key": "%s"}' % key) + if acl: + conditions.append('{"acl": "%s"}' % acl) + fields.append({"name": "acl", "value": acl}) + if success_action_redirect: + conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect) + fields.append({"name": "success_action_redirect", "value": success_action_redirect}) + if max_content_length: + conditions.append('["content-length-range", 0, %i]' % max_content_length) + + if self.provider.security_token: + fields.append({'name': 'x-amz-security-token', + 'value': self.provider.security_token}) + conditions.append('{"x-amz-security-token": "%s"}' % self.provider.security_token) + + if storage_class: + fields.append({'name': 'x-amz-storage-class', + 'value': storage_class}) + conditions.append('{"x-amz-storage-class": "%s"}' % storage_class) + + if server_side_encryption: + fields.append({'name': 'x-amz-server-side-encryption', + 'value': server_side_encryption}) + conditions.append('{"x-amz-server-side-encryption": "%s"}' % server_side_encryption) + + policy = self.build_post_policy(expiration, conditions) + + # Add the base64-encoded policy document as the 'policy' field + policy_b64 = base64.b64encode(policy) + fields.append({"name": "policy", "value": policy_b64}) + + # Add the AWS access key as the 'AWSAccessKeyId' field + fields.append({"name": "AWSAccessKeyId", + "value": self.aws_access_key_id}) + + # Add signature for encoded policy document as the + # 'signature' field + signature = self._auth_handler.sign_string(policy_b64) + fields.append({"name": "signature", "value": signature}) + fields.append({"name": "key", "value": key}) + + # HTTPS protocol will be used if the secure HTTP option is enabled. + url = '%s://%s/' % (http_method, + self.calling_format.build_host(self.server_name(), + bucket_name)) + + return {"action": url, "fields": fields} + + def generate_url_sigv4(self, expires_in, method, bucket='', key='', + headers=None, force_http=False, + response_headers=None, version_id=None, + iso_date=None): + path = self.calling_format.build_path_base(bucket, key) + auth_path = self.calling_format.build_auth_path(bucket, key) + host = self.calling_format.build_host(self.server_name(), bucket) + + # For presigned URLs we should ignore the port if it's HTTPS + if host.endswith(':443'): + host = host[:-4] + + params = {} + if version_id is not None: + params['VersionId'] = version_id + + http_request = self.build_base_http_request(method, path, auth_path, + headers=headers, host=host, + params=params) + + return self._auth_handler.presign(http_request, expires_in, + iso_date=iso_date) + + def generate_url(self, expires_in, method, bucket='', key='', headers=None, + query_auth=True, force_http=False, response_headers=None, + expires_in_absolute=False, version_id=None): + if self._auth_handler.capability[0] == 'hmac-v4-s3': + # Handle the special sigv4 case + return self.generate_url_sigv4(expires_in, method, bucket=bucket, + key=key, headers=headers, force_http=force_http, + response_headers=response_headers, version_id=version_id) + + headers = headers or {} + if expires_in_absolute: + expires = int(expires_in) + else: + expires = int(time.time() + expires_in) + auth_path = self.calling_format.build_auth_path(bucket, key) + auth_path = self.get_path(auth_path) + # optional version_id and response_headers need to be added to + # the query param list. + extra_qp = [] + if version_id is not None: + extra_qp.append("versionId=%s" % version_id) + if response_headers: + for k, v in response_headers.items(): + extra_qp.append("%s=%s" % (k, urllib.parse.quote(v))) + if self.provider.security_token: + headers['x-amz-security-token'] = self.provider.security_token + if extra_qp: + delimiter = '?' if '?' not in auth_path else '&' + auth_path += delimiter + '&'.join(extra_qp) + c_string = boto.utils.canonical_string(method, auth_path, headers, + expires, self.provider) + b64_hmac = self._auth_handler.sign_string(c_string) + encoded_canonical = urllib.parse.quote(b64_hmac, safe='') + self.calling_format.build_path_base(bucket, key) + if query_auth: + query_part = '?' + self.QueryString % (encoded_canonical, expires, + self.aws_access_key_id) + else: + query_part = '' + if headers: + hdr_prefix = self.provider.header_prefix + for k, v in headers.items(): + if k.startswith(hdr_prefix): + # headers used for sig generation must be + # included in the url also. + extra_qp.append("%s=%s" % (k, urllib.parse.quote(v))) + if extra_qp: + delimiter = '?' if not query_part else '&' + query_part += delimiter + '&'.join(extra_qp) + if force_http: + protocol = 'http' + port = 80 + else: + protocol = self.protocol + port = self.port + return self.calling_format.build_url_base(self, protocol, + self.server_name(port), + bucket, key) + query_part + + def get_all_buckets(self, headers=None): + response = self.make_request('GET', headers=headers) + body = response.read() + if response.status > 300: + raise self.provider.storage_response_error( + response.status, response.reason, body) + rs = ResultSet([('Bucket', self.bucket_class)]) + h = handler.XmlHandler(rs, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return rs + + def get_canonical_user_id(self, headers=None): + """ + Convenience method that returns the "CanonicalUserID" of the + user who's credentials are associated with the connection. + The only way to get this value is to do a GET request on the + service which returns all buckets associated with the account. + As part of that response, the canonical userid is returned. + This method simply does all of that and then returns just the + user id. + + :rtype: string + :return: A string containing the canonical user id. + """ + rs = self.get_all_buckets(headers=headers) + return rs.owner.id + + def get_bucket(self, bucket_name, validate=True, headers=None): + """ + Retrieves a bucket by name. + + If the bucket does not exist, an ``S3ResponseError`` will be raised. If + you are unsure if the bucket exists or not, you can use the + ``S3Connection.lookup`` method, which will either return a valid bucket + or ``None``. + + If ``validate=False`` is passed, no request is made to the service (no + charge/communication delay). This is only safe to do if you are **sure** + the bucket exists. + + If the default ``validate=True`` is passed, a request is made to the + service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched + a list of keys (but with a max limit set to ``0``, always returning an empty + list) in the bucket (& included better error messages), at an + increased expense. As of Boto v2.25.0, this now performs a HEAD request + (less expensive but worse error messages). + + If you were relying on parsing the error message before, you should call + something like:: + + bucket = conn.get_bucket('', validate=False) + bucket.get_all_keys(maxkeys=0) + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :type validate: boolean + :param validate: If ``True``, it will try to verify the bucket exists + on the service-side. (Default: ``True``) + """ + if validate: + return self.head_bucket(bucket_name, headers=headers) + else: + return self.bucket_class(self, bucket_name) + + def head_bucket(self, bucket_name, headers=None): + """ + Determines if a bucket exists by name. + + If the bucket does not exist, an ``S3ResponseError`` will be raised. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :returns: A object + """ + response = self.make_request('HEAD', bucket_name, headers=headers) + body = response.read() + if response.status == 200: + return self.bucket_class(self, bucket_name) + elif response.status == 403: + # For backward-compatibility, we'll populate part of the exception + # with the most-common default. + err = self.provider.storage_response_error( + response.status, + response.reason, + body + ) + err.error_code = 'AccessDenied' + err.error_message = 'Access Denied' + raise err + elif response.status == 404: + # For backward-compatibility, we'll populate part of the exception + # with the most-common default. + err = self.provider.storage_response_error( + response.status, + response.reason, + body + ) + err.error_code = 'NoSuchBucket' + err.error_message = 'The specified bucket does not exist' + raise err + else: + raise self.provider.storage_response_error( + response.status, response.reason, body) + + def lookup(self, bucket_name, validate=True, headers=None): + """ + Attempts to get a bucket from S3. + + Works identically to ``S3Connection.get_bucket``, save for that it + will return ``None`` if the bucket does not exist instead of throwing + an exception. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :type validate: boolean + :param validate: If ``True``, it will try to fetch all keys within the + given bucket. (Default: ``True``) + """ + try: + bucket = self.get_bucket(bucket_name, validate, headers=headers) + except: + bucket = None + return bucket + + def create_bucket(self, bucket_name, headers=None, + location=Location.DEFAULT, policy=None): + """ + Creates a new located bucket. By default it's in the USA. You can pass + Location.EU to create a European bucket (S3) or European Union bucket + (GCS). + + :type bucket_name: string + :param bucket_name: The name of the new bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to AWS. + + :type location: str + :param location: The location of the new bucket. You can use one of the + constants in :class:`boto.s3.connection.Location` (e.g. Location.EU, + Location.USWest, etc.). + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + """ + check_lowercase_bucketname(bucket_name) + + if policy: + if headers: + headers[self.provider.acl_header] = policy + else: + headers = {self.provider.acl_header: policy} + if location == Location.DEFAULT: + data = '' + else: + data = '' + \ + location + '' + response = self.make_request('PUT', bucket_name, headers=headers, + data=data) + body = response.read() + if response.status == 409: + raise self.provider.storage_create_error( + response.status, response.reason, body) + if response.status == 200: + return self.bucket_class(self, bucket_name) + else: + raise self.provider.storage_response_error( + response.status, response.reason, body) + + def delete_bucket(self, bucket, headers=None): + """ + Removes an S3 bucket. + + In order to remove the bucket, it must first be empty. If the bucket is + not empty, an ``S3ResponseError`` will be raised. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + """ + response = self.make_request('DELETE', bucket, headers=headers) + body = response.read() + if response.status != 204: + raise self.provider.storage_response_error( + response.status, response.reason, body) + + def make_request(self, method, bucket='', key='', headers=None, data='', + query_args=None, sender=None, override_num_retries=None, + retry_handler=None): + if isinstance(bucket, self.bucket_class): + bucket = bucket.name + if isinstance(key, Key): + key = key.name + path = self.calling_format.build_path_base(bucket, key) + boto.log.debug('path=%s' % path) + auth_path = self.calling_format.build_auth_path(bucket, key) + boto.log.debug('auth_path=%s' % auth_path) + host = self.calling_format.build_host(self.server_name(), bucket) + if query_args: + path += '?' + query_args + boto.log.debug('path=%s' % path) + auth_path += '?' + query_args + boto.log.debug('auth_path=%s' % auth_path) + return super(S3Connection, self).make_request( + method, path, headers, + data, host, auth_path, sender, + override_num_retries=override_num_retries, + retry_handler=retry_handler + ) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/cors.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/cors.py new file mode 100644 index 0000000000000000000000000000000000000000..d97ee890aabe0701a387d54c161943b947f40b77 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/cors.py @@ -0,0 +1,210 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class CORSRule(object): + """ + CORS rule for a bucket. + + :ivar id: A unique identifier for the rule. The ID value can be + up to 255 characters long. The IDs help you find a rule in + the configuration. + + :ivar allowed_methods: An HTTP method that you want to allow the + origin to execute. Each CORSRule must identify at least one + origin and one method. Valid values are: + GET|PUT|HEAD|POST|DELETE + + :ivar allowed_origin: An origin that you want to allow cross-domain + requests from. This can contain at most one * wild character. + Each CORSRule must identify at least one origin and one method. + The origin value can include at most one '*' wild character. + For example, "http://*.example.com". You can also specify + only * as the origin value allowing all origins cross-domain access. + + :ivar allowed_header: Specifies which headers are allowed in a + pre-flight OPTIONS request via the + Access-Control-Request-Headers header. Each header name + specified in the Access-Control-Request-Headers header must + have a corresponding entry in the rule. Amazon S3 will send + only the allowed headers in a response that were requested. + This can contain at most one * wild character. + + :ivar max_age_seconds: The time in seconds that your browser is to + cache the preflight response for the specified resource. + + :ivar expose_header: One or more headers in the response that you + want customers to be able to access from their applications + (for example, from a JavaScript XMLHttpRequest object). You + add one ExposeHeader element in the rule for each header. + """ + + def __init__(self, allowed_method=None, allowed_origin=None, + id=None, allowed_header=None, max_age_seconds=None, + expose_header=None): + if allowed_method is None: + allowed_method = [] + self.allowed_method = allowed_method + if allowed_origin is None: + allowed_origin = [] + self.allowed_origin = allowed_origin + self.id = id + if allowed_header is None: + allowed_header = [] + self.allowed_header = allowed_header + self.max_age_seconds = max_age_seconds + if expose_header is None: + expose_header = [] + self.expose_header = expose_header + + def __repr__(self): + return '' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ID': + self.id = value + elif name == 'AllowedMethod': + self.allowed_method.append(value) + elif name == 'AllowedOrigin': + self.allowed_origin.append(value) + elif name == 'AllowedHeader': + self.allowed_header.append(value) + elif name == 'MaxAgeSeconds': + self.max_age_seconds = int(value) + elif name == 'ExposeHeader': + self.expose_header.append(value) + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + for allowed_method in self.allowed_method: + s += '%s' % allowed_method + for allowed_origin in self.allowed_origin: + s += '%s' % allowed_origin + for allowed_header in self.allowed_header: + s += '%s' % allowed_header + for expose_header in self.expose_header: + s += '%s' % expose_header + if self.max_age_seconds: + s += '%d' % self.max_age_seconds + if self.id: + s += '%s' % self.id + s += '' + return s + + +class CORSConfiguration(list): + """ + A container for the rules associated with a CORS configuration. + """ + + def startElement(self, name, attrs, connection): + if name == 'CORSRule': + rule = CORSRule() + self.append(rule) + return rule + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + def to_xml(self): + """ + Returns a string containing the XML version of the Lifecycle + configuration as defined by S3. + """ + s = '' + for rule in self: + s += rule.to_xml() + s += '' + return s + + def add_rule(self, allowed_method, allowed_origin, + id=None, allowed_header=None, max_age_seconds=None, + expose_header=None): + """ + Add a rule to this CORS configuration. This only adds + the rule to the local copy. To install the new rule(s) on + the bucket, you need to pass this CORS config object + to the set_cors method of the Bucket object. + + :type allowed_methods: list of str + :param allowed_methods: An HTTP method that you want to allow the + origin to execute. Each CORSRule must identify at least one + origin and one method. Valid values are: + GET|PUT|HEAD|POST|DELETE + + :type allowed_origin: list of str + :param allowed_origin: An origin that you want to allow cross-domain + requests from. This can contain at most one * wild character. + Each CORSRule must identify at least one origin and one method. + The origin value can include at most one '*' wild character. + For example, "http://*.example.com". You can also specify + only * as the origin value allowing all origins + cross-domain access. + + :type id: str + :param id: A unique identifier for the rule. The ID value can be + up to 255 characters long. The IDs help you find a rule in + the configuration. + + :type allowed_header: list of str + :param allowed_header: Specifies which headers are allowed in a + pre-flight OPTIONS request via the + Access-Control-Request-Headers header. Each header name + specified in the Access-Control-Request-Headers header must + have a corresponding entry in the rule. Amazon S3 will send + only the allowed headers in a response that were requested. + This can contain at most one * wild character. + + :type max_age_seconds: int + :param max_age_seconds: The time in seconds that your browser is to + cache the preflight response for the specified resource. + + :type expose_header: list of str + :param expose_header: One or more headers in the response that you + want customers to be able to access from their applications + (for example, from a JavaScript XMLHttpRequest object). You + add one ExposeHeader element in the rule for each header. + """ + if not isinstance(allowed_method, (list, tuple)): + allowed_method = [allowed_method] + if not isinstance(allowed_origin, (list, tuple)): + allowed_origin = [allowed_origin] + if not isinstance(allowed_origin, (list, tuple)): + if allowed_origin is None: + allowed_origin = [] + else: + allowed_origin = [allowed_origin] + if not isinstance(expose_header, (list, tuple)): + if expose_header is None: + expose_header = [] + else: + expose_header = [expose_header] + rule = CORSRule(allowed_method, allowed_origin, id, allowed_header, + max_age_seconds, expose_header) + self.append(rule) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/deletemarker.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/deletemarker.py new file mode 100644 index 0000000000000000000000000000000000000000..d8e7cc8b58cb548c1738a88b31c0fd2e67c3e6e4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/deletemarker.py @@ -0,0 +1,55 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.user import User + +class DeleteMarker(object): + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + self.version_id = None + self.is_latest = False + self.last_modified = None + self.owner = None + + def startElement(self, name, attrs, connection): + if name == 'Owner': + self.owner = User(self) + return self.owner + else: + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.name = value + elif name == 'IsLatest': + if value == 'true': + self.is_latest = True + else: + self.is_latest = False + elif name == 'LastModified': + self.last_modified = value + elif name == 'Owner': + pass + elif name == 'VersionId': + self.version_id = value + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/key.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/key.py new file mode 100644 index 0000000000000000000000000000000000000000..194c6b6e9334d0b25e16d15ba7e0699628615e0c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/key.py @@ -0,0 +1,1921 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Nexenta Systems Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import email.utils +import errno +import hashlib +import mimetypes +import os +import re +import base64 +import binascii +import math +from hashlib import md5 +import boto.utils +from boto.compat import BytesIO, six, urllib, encodebytes + +from boto.exception import BotoClientError +from boto.exception import StorageDataError +from boto.exception import PleaseRetryException +from boto.provider import Provider +from boto.s3.keyfile import KeyFile +from boto.s3.user import User +from boto import UserAgent +from boto.utils import compute_md5, compute_hash +from boto.utils import find_matching_headers +from boto.utils import merge_headers_by_name + + +class Key(object): + """ + Represents a key (object) in an S3 bucket. + + :ivar bucket: The parent :class:`boto.s3.bucket.Bucket`. + :ivar name: The name of this Key object. + :ivar metadata: A dictionary containing user metadata that you + wish to store with the object or that has been retrieved from + an existing object. + :ivar cache_control: The value of the `Cache-Control` HTTP header. + :ivar content_type: The value of the `Content-Type` HTTP header. + :ivar content_encoding: The value of the `Content-Encoding` HTTP header. + :ivar content_disposition: The value of the `Content-Disposition` HTTP + header. + :ivar content_language: The value of the `Content-Language` HTTP header. + :ivar etag: The `etag` associated with this object. + :ivar last_modified: The string timestamp representing the last + time this object was modified in S3. + :ivar owner: The ID of the owner of this object. + :ivar storage_class: The storage class of the object. Currently, one of: + STANDARD | REDUCED_REDUNDANCY | GLACIER + :ivar md5: The MD5 hash of the contents of the object. + :ivar size: The size, in bytes, of the object. + :ivar version_id: The version ID of this object, if it is a versioned + object. + :ivar encrypted: Whether the object is encrypted while at rest on + the server. + """ + + DefaultContentType = 'application/octet-stream' + + RestoreBody = """ + + %s + """ + + + BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192) + + # The object metadata fields a user can set, other than custom metadata + # fields (i.e., those beginning with a provider-specific prefix like + # x-amz-meta). + base_user_settable_fields = set(["cache-control", "content-disposition", + "content-encoding", "content-language", + "content-md5", "content-type", + "x-robots-tag", "expires"]) + _underscore_base_user_settable_fields = set() + for f in base_user_settable_fields: + _underscore_base_user_settable_fields.add(f.replace('-', '_')) + # Metadata fields, whether user-settable or not, other than custom + # metadata fields (i.e., those beginning with a provider specific prefix + # like x-amz-meta). + base_fields = (base_user_settable_fields | + set(["last-modified", "content-length", "date", "etag"])) + + + + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + self.metadata = {} + self.cache_control = None + self.content_type = self.DefaultContentType + self.content_encoding = None + self.content_disposition = None + self.content_language = None + self.filename = None + self.etag = None + self.is_latest = False + self.last_modified = None + self.owner = None + self._storage_class = None + self.path = None + self.resp = None + self.mode = None + self.size = None + self.version_id = None + self.source_version_id = None + self.delete_marker = False + self.encrypted = None + # If the object is being restored, this attribute will be set to True. + # If the object is restored, it will be set to False. Otherwise this + # value will be None. If the restore is completed (ongoing_restore = + # False), the expiry_date will be populated with the expiry date of the + # restored object. + self.ongoing_restore = None + self.expiry_date = None + self.local_hashes = {} + + def __repr__(self): + if self.bucket: + name = u'' % (self.bucket.name, self.name) + else: + name = u'' % self.name + + # Encode to bytes for Python 2 to prevent display decoding issues + if not isinstance(name, str): + name = name.encode('utf-8') + + return name + + def __iter__(self): + return self + + @property + def provider(self): + provider = None + if self.bucket and self.bucket.connection: + provider = self.bucket.connection.provider + return provider + + def _get_key(self): + return self.name + + def _set_key(self, value): + self.name = value + + key = property(_get_key, _set_key); + + def _get_md5(self): + if 'md5' in self.local_hashes and self.local_hashes['md5']: + return binascii.b2a_hex(self.local_hashes['md5']) + + def _set_md5(self, value): + if value: + self.local_hashes['md5'] = binascii.a2b_hex(value) + elif 'md5' in self.local_hashes: + self.local_hashes.pop('md5', None) + + md5 = property(_get_md5, _set_md5); + + def _get_base64md5(self): + if 'md5' in self.local_hashes and self.local_hashes['md5']: + md5 = self.local_hashes['md5'] + if not isinstance(md5, bytes): + md5 = md5.encode('utf-8') + return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n') + + def _set_base64md5(self, value): + if value: + if not isinstance(value, six.string_types): + value = value.decode('utf-8') + self.local_hashes['md5'] = binascii.a2b_base64(value) + elif 'md5' in self.local_hashes: + del self.local_hashes['md5'] + + base64md5 = property(_get_base64md5, _set_base64md5); + + def _get_storage_class(self): + if self._storage_class is None and self.bucket: + # Attempt to fetch storage class + list_items = list(self.bucket.list(self.name.encode('utf-8'))) + if len(list_items) and getattr(list_items[0], '_storage_class', + None): + self._storage_class = list_items[0]._storage_class + else: + # Key is not yet saved? Just use default... + self._storage_class = 'STANDARD' + + return self._storage_class + + def _set_storage_class(self, value): + self._storage_class = value + + storage_class = property(_get_storage_class, _set_storage_class) + + def get_md5_from_hexdigest(self, md5_hexdigest): + """ + A utility function to create the 2-tuple (md5hexdigest, base64md5) + from just having a precalculated md5_hexdigest. + """ + digest = binascii.unhexlify(md5_hexdigest) + base64md5 = encodebytes(digest) + if base64md5[-1] == '\n': + base64md5 = base64md5[0:-1] + return (md5_hexdigest, base64md5) + + def handle_encryption_headers(self, resp): + provider = self.bucket.connection.provider + if provider.server_side_encryption_header: + self.encrypted = resp.getheader( + provider.server_side_encryption_header, None) + else: + self.encrypted = None + + def handle_version_headers(self, resp, force=False): + provider = self.bucket.connection.provider + # If the Key object already has a version_id attribute value, it + # means that it represents an explicit version and the user is + # doing a get_contents_*(version_id=) to retrieve another + # version of the Key. In that case, we don't really want to + # overwrite the version_id in this Key object. Comprende? + if self.version_id is None or force: + self.version_id = resp.getheader(provider.version_id, None) + self.source_version_id = resp.getheader(provider.copy_source_version_id, + None) + if resp.getheader(provider.delete_marker, 'false') == 'true': + self.delete_marker = True + else: + self.delete_marker = False + + def handle_restore_headers(self, response): + provider = self.bucket.connection.provider + header = response.getheader(provider.restore_header) + if header is None: + return + parts = header.split(',', 1) + for part in parts: + key, val = [i.strip() for i in part.split('=')] + val = val.replace('"', '') + if key == 'ongoing-request': + self.ongoing_restore = True if val.lower() == 'true' else False + elif key == 'expiry-date': + self.expiry_date = val + + def handle_addl_headers(self, headers): + """ + Used by Key subclasses to do additional, provider-specific + processing of response headers. No-op for this base class. + """ + pass + + def open_read(self, headers=None, query_args='', + override_num_retries=None, response_headers=None): + """ + Open this key for reading + + :type headers: dict + :param headers: Headers to pass in the web request + + :type query_args: string + :param query_args: Arguments to pass in the query string + (ie, 'torrent') + + :type override_num_retries: int + :param override_num_retries: If not None will override configured + num_retries parameter for underlying GET. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + """ + if self.resp is None: + self.mode = 'r' + + provider = self.bucket.connection.provider + self.resp = self.bucket.connection.make_request( + 'GET', self.bucket.name, self.name, headers, + query_args=query_args, + override_num_retries=override_num_retries) + if self.resp.status < 199 or self.resp.status > 299: + body = self.resp.read() + raise provider.storage_response_error(self.resp.status, + self.resp.reason, body) + response_headers = self.resp.msg + self.metadata = boto.utils.get_aws_metadata(response_headers, + provider) + for name, value in response_headers.items(): + # To get correct size for Range GETs, use Content-Range + # header if one was returned. If not, use Content-Length + # header. + if (name.lower() == 'content-length' and + 'Content-Range' not in response_headers): + self.size = int(value) + elif name.lower() == 'content-range': + end_range = re.sub('.*/(.*)', '\\1', value) + self.size = int(end_range) + elif name.lower() in Key.base_fields: + self.__dict__[name.lower().replace('-', '_')] = value + self.handle_version_headers(self.resp) + self.handle_encryption_headers(self.resp) + self.handle_restore_headers(self.resp) + self.handle_addl_headers(self.resp.getheaders()) + + def open_write(self, headers=None, override_num_retries=None): + """ + Open this key for writing. + Not yet implemented + + :type headers: dict + :param headers: Headers to pass in the write request + + :type override_num_retries: int + :param override_num_retries: If not None will override configured + num_retries parameter for underlying PUT. + """ + raise BotoClientError('Not Implemented') + + def open(self, mode='r', headers=None, query_args=None, + override_num_retries=None): + if mode == 'r': + self.mode = 'r' + self.open_read(headers=headers, query_args=query_args, + override_num_retries=override_num_retries) + elif mode == 'w': + self.mode = 'w' + self.open_write(headers=headers, + override_num_retries=override_num_retries) + else: + raise BotoClientError('Invalid mode: %s' % mode) + + closed = False + + def close(self, fast=False): + """ + Close this key. + + :type fast: bool + :param fast: True if you want the connection to be closed without first + reading the content. This should only be used in cases where subsequent + calls don't need to return the content from the open HTTP connection. + Note: As explained at + http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse, + callers must read the whole response before sending a new request to the + server. Calling Key.close(fast=True) and making a subsequent request to + the server will work because boto will get an httplib exception and + close/reopen the connection. + + """ + if self.resp and not fast: + self.resp.read() + self.resp = None + self.mode = None + self.closed = True + + def next(self): + """ + By providing a next method, the key object supports use as an iterator. + For example, you can now say: + + for bytes in key: + write bytes to a file or whatever + + All of the HTTP connection stuff is handled for you. + """ + self.open_read() + data = self.resp.read(self.BufferSize) + if not data: + self.close() + raise StopIteration + return data + + # Python 3 iterator support + __next__ = next + + def read(self, size=0): + self.open_read() + if size == 0: + data = self.resp.read() + else: + data = self.resp.read(size) + if not data: + self.close() + return data + + def change_storage_class(self, new_storage_class, dst_bucket=None, + validate_dst_bucket=True): + """ + Change the storage class of an existing key. + Depending on whether a different destination bucket is supplied + or not, this will either move the item within the bucket, preserving + all metadata and ACL info bucket changing the storage class or it + will copy the item to the provided destination bucket, also + preserving metadata and ACL info. + + :type new_storage_class: string + :param new_storage_class: The new storage class for the Key. + Possible values are: + * STANDARD + * REDUCED_REDUNDANCY + + :type dst_bucket: string + :param dst_bucket: The name of a destination bucket. If not + provided the current bucket of the key will be used. + + :type validate_dst_bucket: bool + :param validate_dst_bucket: If True, will validate the dst_bucket + by using an extra list request. + """ + bucket_name = dst_bucket or self.bucket.name + if new_storage_class == 'STANDARD': + return self.copy(bucket_name, self.name, + reduced_redundancy=False, preserve_acl=True, + validate_dst_bucket=validate_dst_bucket) + elif new_storage_class == 'REDUCED_REDUNDANCY': + return self.copy(bucket_name, self.name, + reduced_redundancy=True, preserve_acl=True, + validate_dst_bucket=validate_dst_bucket) + else: + raise BotoClientError('Invalid storage class: %s' % + new_storage_class) + + def copy(self, dst_bucket, dst_key, metadata=None, + reduced_redundancy=False, preserve_acl=False, + encrypt_key=False, validate_dst_bucket=True): + """ + Copy this Key to another bucket. + + :type dst_bucket: string + :param dst_bucket: The name of the destination bucket + + :type dst_key: string + :param dst_key: The name of the destination key + + :type metadata: dict + :param metadata: Metadata to be associated with new key. If + metadata is supplied, it will replace the metadata of the + source key being copied. If no metadata is supplied, the + source key's metadata will be copied to the new key. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will force the + storage class of the new Key to be REDUCED_REDUNDANCY + regardless of the storage class of the key being copied. + The Reduced Redundancy Storage (RRS) feature of S3, + provides lower redundancy at lower storage cost. + + :type preserve_acl: bool + :param preserve_acl: If True, the ACL from the source key will + be copied to the destination key. If False, the + destination key will have the default ACL. Note that + preserving the ACL in the new key object will require two + additional API calls to S3, one to retrieve the current + ACL and one to set that ACL on the new object. If you + don't care about the ACL, a value of False will be + significantly more efficient. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :type validate_dst_bucket: bool + :param validate_dst_bucket: If True, will validate the dst_bucket + by using an extra list request. + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: An instance of the newly created key object + """ + dst_bucket = self.bucket.connection.lookup(dst_bucket, + validate_dst_bucket) + if reduced_redundancy: + storage_class = 'REDUCED_REDUNDANCY' + else: + storage_class = self.storage_class + return dst_bucket.copy_key(dst_key, self.bucket.name, + self.name, metadata, + storage_class=storage_class, + preserve_acl=preserve_acl, + encrypt_key=encrypt_key, + src_version_id=self.version_id) + + def startElement(self, name, attrs, connection): + if name == 'Owner': + self.owner = User(self) + return self.owner + else: + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.name = value + elif name == 'ETag': + self.etag = value + elif name == 'IsLatest': + if value == 'true': + self.is_latest = True + else: + self.is_latest = False + elif name == 'LastModified': + self.last_modified = value + elif name == 'Size': + self.size = int(value) + elif name == 'StorageClass': + self.storage_class = value + elif name == 'Owner': + pass + elif name == 'VersionId': + self.version_id = value + else: + setattr(self, name, value) + + def exists(self, headers=None): + """ + Returns True if the key exists + + :rtype: bool + :return: Whether the key exists on S3 + """ + return bool(self.bucket.lookup(self.name, headers=headers)) + + def delete(self, headers=None): + """ + Delete this key from S3 + """ + return self.bucket.delete_key(self.name, version_id=self.version_id, + headers=headers) + + def get_metadata(self, name): + return self.metadata.get(name) + + def set_metadata(self, name, value): + # Ensure that metadata that is vital to signing is in the correct + # case. Applies to ``Content-Type`` & ``Content-MD5``. + if name.lower() == 'content-type': + self.metadata['Content-Type'] = value + elif name.lower() == 'content-md5': + self.metadata['Content-MD5'] = value + else: + self.metadata[name] = value + if name.lower() in Key.base_user_settable_fields: + self.__dict__[name.lower().replace('-', '_')] = value + + def update_metadata(self, d): + self.metadata.update(d) + + # convenience methods for setting/getting ACL + def set_acl(self, acl_str, headers=None): + if self.bucket is not None: + self.bucket.set_acl(acl_str, self.name, headers=headers) + + def get_acl(self, headers=None): + if self.bucket is not None: + return self.bucket.get_acl(self.name, headers=headers) + + def get_xml_acl(self, headers=None): + if self.bucket is not None: + return self.bucket.get_xml_acl(self.name, headers=headers) + + def set_xml_acl(self, acl_str, headers=None): + if self.bucket is not None: + return self.bucket.set_xml_acl(acl_str, self.name, headers=headers) + + def set_canned_acl(self, acl_str, headers=None): + return self.bucket.set_canned_acl(acl_str, self.name, headers) + + def get_redirect(self): + """Return the redirect location configured for this key. + + If no redirect is configured (via set_redirect), then None + will be returned. + + """ + response = self.bucket.connection.make_request( + 'HEAD', self.bucket.name, self.name) + if response.status == 200: + return response.getheader('x-amz-website-redirect-location') + else: + raise self.provider.storage_response_error( + response.status, response.reason, response.read()) + + def set_redirect(self, redirect_location, headers=None): + """Configure this key to redirect to another location. + + When the bucket associated with this key is accessed from the website + endpoint, a 301 redirect will be issued to the specified + `redirect_location`. + + :type redirect_location: string + :param redirect_location: The location to redirect. + + """ + if headers is None: + headers = {} + else: + headers = headers.copy() + + headers['x-amz-website-redirect-location'] = redirect_location + response = self.bucket.connection.make_request('PUT', self.bucket.name, + self.name, headers) + if response.status == 200: + return True + else: + raise self.provider.storage_response_error( + response.status, response.reason, response.read()) + + def make_public(self, headers=None): + return self.bucket.set_canned_acl('public-read', self.name, headers) + + def generate_url(self, expires_in, method='GET', headers=None, + query_auth=True, force_http=False, response_headers=None, + expires_in_absolute=False, version_id=None, + policy=None, reduced_redundancy=False, encrypt_key=False): + """ + Generate a URL to access this key. + + :type expires_in: int + :param expires_in: How long the url is valid for, in seconds + + :type method: string + :param method: The method to use for retrieving the file + (default is GET) + + :type headers: dict + :param headers: Any headers to pass along in the request + + :type query_auth: bool + :param query_auth: + + :type force_http: bool + :param force_http: If True, http will be used instead of https. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :type expires_in_absolute: bool + :param expires_in_absolute: + + :type version_id: string + :param version_id: The version_id of the object to GET. If specified + this overrides any value in the key. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :rtype: string + :return: The URL to access the key + """ + provider = self.bucket.connection.provider + version_id = version_id or self.version_id + if headers is None: + headers = {} + else: + headers = headers.copy() + + # add headers accordingly (usually PUT case) + if policy: + headers[provider.acl_header] = policy + if reduced_redundancy: + self.storage_class = 'REDUCED_REDUNDANCY' + if provider.storage_class_header: + headers[provider.storage_class_header] = self.storage_class + if encrypt_key: + headers[provider.server_side_encryption_header] = 'AES256' + headers = boto.utils.merge_meta(headers, self.metadata, provider) + + return self.bucket.connection.generate_url(expires_in, method, + self.bucket.name, self.name, + headers, query_auth, + force_http, + response_headers, + expires_in_absolute, + version_id) + + def send_file(self, fp, headers=None, cb=None, num_cb=10, + query_args=None, chunked_transfer=False, size=None): + """ + Upload a file to a key into a bucket on S3. + + :type fp: file + :param fp: The file pointer to upload. The file pointer must + point point at the offset from which you wish to upload. + ie. if uploading the full file, it should point at the + start of the file. Normally when a file is opened for + reading, the fp will point at the first byte. See the + bytes parameter below for more info. + + :type headers: dict + :param headers: The headers to pass along with the PUT request + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file + transfer. Providing a negative integer will cause your + callback to be called with each buffer read. + + :type query_args: string + :param query_args: (optional) Arguments to pass in the query string. + + :type chunked_transfer: boolean + :param chunked_transfer: (optional) If true, we use chunked + Transfer-Encoding. + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where you are splitting the file + up into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + """ + self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, + query_args=query_args, + chunked_transfer=chunked_transfer, size=size) + + def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10, + query_args=None, chunked_transfer=False, size=None, + hash_algs=None): + provider = self.bucket.connection.provider + try: + spos = fp.tell() + except IOError: + spos = None + self.read_from_stream = False + + # If hash_algs is unset and the MD5 hasn't already been computed, + # default to an MD5 hash_alg to hash the data on-the-fly. + if hash_algs is None and not self.md5: + hash_algs = {'md5': md5} + digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {}) + + def sender(http_conn, method, path, data, headers): + # This function is called repeatedly for temporary retries + # so we must be sure the file pointer is pointing at the + # start of the data. + if spos is not None and spos != fp.tell(): + fp.seek(spos) + elif spos is None and self.read_from_stream: + # if seek is not supported, and we've read from this + # stream already, then we need to abort retries to + # avoid setting bad data. + raise provider.storage_data_error( + 'Cannot retry failed request. fp does not support seeking.') + + # If the caller explicitly specified host header, tell putrequest + # not to add a second host header. Similarly for accept-encoding. + skips = {} + if boto.utils.find_matching_headers('host', headers): + skips['skip_host'] = 1 + if boto.utils.find_matching_headers('accept-encoding', headers): + skips['skip_accept_encoding'] = 1 + http_conn.putrequest(method, path, **skips) + for key in headers: + http_conn.putheader(key, headers[key]) + http_conn.endheaders() + + save_debug = self.bucket.connection.debug + self.bucket.connection.debug = 0 + # If the debuglevel < 4 we don't want to show connection + # payload, so turn off HTTP connection-level debug output (to + # be restored below). + # Use the getattr approach to allow this to work in AppEngine. + if getattr(http_conn, 'debuglevel', 0) < 4: + http_conn.set_debuglevel(0) + + data_len = 0 + if cb: + if size: + cb_size = size + elif self.size: + cb_size = self.size + else: + cb_size = 0 + if chunked_transfer and cb_size == 0: + # For chunked Transfer, we call the cb for every 1MB + # of data transferred, except when we know size. + cb_count = (1024 * 1024) / self.BufferSize + elif num_cb > 1: + cb_count = int( + math.ceil(cb_size / self.BufferSize / (num_cb - 1.0))) + elif num_cb < 0: + cb_count = -1 + else: + cb_count = 0 + i = 0 + cb(data_len, cb_size) + + bytes_togo = size + if bytes_togo and bytes_togo < self.BufferSize: + chunk = fp.read(bytes_togo) + else: + chunk = fp.read(self.BufferSize) + + if not isinstance(chunk, bytes): + chunk = chunk.encode('utf-8') + + if spos is None: + # read at least something from a non-seekable fp. + self.read_from_stream = True + while chunk: + chunk_len = len(chunk) + data_len += chunk_len + if chunked_transfer: + http_conn.send('%x;\r\n' % chunk_len) + http_conn.send(chunk) + http_conn.send('\r\n') + else: + http_conn.send(chunk) + for alg in digesters: + digesters[alg].update(chunk) + if bytes_togo: + bytes_togo -= chunk_len + if bytes_togo <= 0: + break + if cb: + i += 1 + if i == cb_count or cb_count == -1: + cb(data_len, cb_size) + i = 0 + if bytes_togo and bytes_togo < self.BufferSize: + chunk = fp.read(bytes_togo) + else: + chunk = fp.read(self.BufferSize) + + if not isinstance(chunk, bytes): + chunk = chunk.encode('utf-8') + + self.size = data_len + + for alg in digesters: + self.local_hashes[alg] = digesters[alg].digest() + + if chunked_transfer: + http_conn.send('0\r\n') + # http_conn.send("Content-MD5: %s\r\n" % self.base64md5) + http_conn.send('\r\n') + + if cb and (cb_count <= 1 or i > 0) and data_len > 0: + cb(data_len, cb_size) + + http_conn.set_debuglevel(save_debug) + self.bucket.connection.debug = save_debug + response = http_conn.getresponse() + body = response.read() + + if not self.should_retry(response, chunked_transfer): + raise provider.storage_response_error( + response.status, response.reason, body) + + return response + + if not headers: + headers = {} + else: + headers = headers.copy() + # Overwrite user-supplied user-agent. + for header in find_matching_headers('User-Agent', headers): + del headers[header] + headers['User-Agent'] = UserAgent + # If storage_class is None, then a user has not explicitly requested + # a storage class, so we can assume STANDARD here + if self._storage_class not in [None, 'STANDARD']: + headers[provider.storage_class_header] = self.storage_class + if find_matching_headers('Content-Encoding', headers): + self.content_encoding = merge_headers_by_name( + 'Content-Encoding', headers) + if find_matching_headers('Content-Language', headers): + self.content_language = merge_headers_by_name( + 'Content-Language', headers) + content_type_headers = find_matching_headers('Content-Type', headers) + if content_type_headers: + # Some use cases need to suppress sending of the Content-Type + # header and depend on the receiving server to set the content + # type. This can be achieved by setting headers['Content-Type'] + # to None when calling this method. + if (len(content_type_headers) == 1 and + headers[content_type_headers[0]] is None): + # Delete null Content-Type value to skip sending that header. + del headers[content_type_headers[0]] + else: + self.content_type = merge_headers_by_name( + 'Content-Type', headers) + elif self.path: + self.content_type = mimetypes.guess_type(self.path)[0] + if self.content_type is None: + self.content_type = self.DefaultContentType + headers['Content-Type'] = self.content_type + else: + headers['Content-Type'] = self.content_type + if self.base64md5: + headers['Content-MD5'] = self.base64md5 + if chunked_transfer: + headers['Transfer-Encoding'] = 'chunked' + #if not self.base64md5: + # headers['Trailer'] = "Content-MD5" + else: + headers['Content-Length'] = str(self.size) + # This is terrible. We need a SHA256 of the body for SigV4, but to do + # the chunked ``sender`` behavior above, the ``fp`` isn't available to + # the auth mechanism (because closures). Detect if it's SigV4 & embelish + # while we can before the auth calculations occur. + if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability(): + kwargs = {'fp': fp, 'hash_algorithm': hashlib.sha256} + if size is not None: + kwargs['size'] = size + headers['_sha256'] = compute_hash(**kwargs)[0] + headers['Expect'] = '100-Continue' + headers = boto.utils.merge_meta(headers, self.metadata, provider) + resp = self.bucket.connection.make_request( + 'PUT', + self.bucket.name, + self.name, + headers, + sender=sender, + query_args=query_args + ) + self.handle_version_headers(resp, force=True) + self.handle_addl_headers(resp.getheaders()) + + def should_retry(self, response, chunked_transfer=False): + provider = self.bucket.connection.provider + + if not chunked_transfer: + if response.status in [500, 503]: + # 500 & 503 can be plain retries. + return True + + if response.getheader('location'): + # If there's a redirect, plain retry. + return True + + if 200 <= response.status <= 299: + self.etag = response.getheader('etag') + md5 = self.md5 + if isinstance(md5, bytes): + md5 = md5.decode('utf-8') + + # If you use customer-provided encryption keys, the ETag value that + # Amazon S3 returns in the response will not be the MD5 of the + # object. + server_side_encryption_customer_algorithm = response.getheader( + 'x-amz-server-side-encryption-customer-algorithm', None) + if server_side_encryption_customer_algorithm is None: + if self.etag != '"%s"' % md5: + raise provider.storage_data_error( + 'ETag from S3 did not match computed MD5. ' + '%s vs. %s' % (self.etag, self.md5)) + + return True + + if response.status == 400: + # The 400 must be trapped so the retry handler can check to + # see if it was a timeout. + # If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb + # out. + body = response.read() + err = provider.storage_response_error( + response.status, + response.reason, + body + ) + + if err.error_code in ['RequestTimeout']: + raise PleaseRetryException( + "Saw %s, retrying" % err.error_code, + response=response + ) + + return False + + def compute_md5(self, fp, size=None): + """ + :type fp: file + :param fp: File pointer to the file to MD5 hash. The file + pointer will be reset to the same position before the + method returns. + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where the file is being split + in place into different parts. Less bytes may be available. + """ + hex_digest, b64_digest, data_size = compute_md5(fp, size=size) + # Returned values are MD5 hash, base64 encoded MD5 hash, and data size. + # The internal implementation of compute_md5() needs to return the + # data size but we don't want to return that value to the external + # caller because it changes the class interface (i.e. it might + # break some code) so we consume the third tuple value here and + # return the remainder of the tuple to the caller, thereby preserving + # the existing interface. + self.size = data_size + return (hex_digest, b64_digest) + + def set_contents_from_stream(self, fp, headers=None, replace=True, + cb=None, num_cb=10, policy=None, + reduced_redundancy=False, query_args=None, + size=None): + """ + Store an object using the name of the Key object as the key in + cloud and the contents of the data stream pointed to by 'fp' as + the contents. + + The stream object is not seekable and total size is not known. + This has the implication that we can't specify the + Content-Size and Content-MD5 in the header. So for huge + uploads, the delay in calculating MD5 is avoided but with a + penalty of inability to verify the integrity of the uploaded + data. + + :type fp: file + :param fp: the file whose contents are to be uploaded + + :type headers: dict + :param headers: additional HTTP headers to be sent with the + PUT request. + + :type replace: bool + :param replace: If this parameter is False, the method will first check + to see if an object exists in the bucket with the same key. If it + does, it won't overwrite it. The default value is True which will + overwrite the object. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted to GS and the second representing the + total number of bytes that need to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter, this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.gs.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the new key + in GS. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. + + :type size: int + :param size: (optional) The Maximum number of bytes to read from + the file pointer (fp). This is useful when uploading a + file in multiple parts where you are splitting the file up + into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + """ + + provider = self.bucket.connection.provider + if not provider.supports_chunked_transfer(): + raise BotoClientError('%s does not support chunked transfer' + % provider.get_provider_name()) + + # Name of the Object should be specified explicitly for Streams. + if not self.name or self.name == '': + raise BotoClientError('Cannot determine the destination ' + 'object name for the given stream') + + if headers is None: + headers = {} + if policy: + headers[provider.acl_header] = policy + + if reduced_redundancy: + self.storage_class = 'REDUCED_REDUNDANCY' + if provider.storage_class_header: + headers[provider.storage_class_header] = self.storage_class + + if self.bucket is not None: + if not replace: + if self.bucket.lookup(self.name): + return + self.send_file(fp, headers, cb, num_cb, query_args, + chunked_transfer=True, size=size) + + def set_contents_from_file(self, fp, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=False, query_args=None, + encrypt_key=False, size=None, rewind=False): + """ + Store an object in S3 using the name of the Key object as the + key in S3 and the contents of the file pointed to by 'fp' as the + contents. The data is read from 'fp' from its current position until + 'size' bytes have been read or EOF. + + :type fp: file + :param fp: the file whose contents to upload + + :type headers: dict + :param headers: Additional HTTP headers that will be sent with + the PUT request. + + :type replace: bool + :param replace: If this parameter is False, the method will + first check to see if an object exists in the bucket with + the same key. If it does, it won't overwrite it. The + default value is True which will overwrite the object. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + :type md5: A tuple containing the hexdigest version of the MD5 + checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second + element. This is the same format returned by the + compute_md5 method. + :param md5: If you need to compute the MD5 for any reason + prior to upload, it's silly to have to do it twice so this + param, if present, will be used as the MD5 values of the + file. Otherwise, the checksum will be computed. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where you are splitting the file + up into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + + :type rewind: bool + :param rewind: (optional) If True, the file pointer (fp) will + be rewound to the start before any bytes are read from + it. The default behaviour is False which reads from the + current position of the file pointer (fp). + + :rtype: int + :return: The number of bytes written to the key. + """ + provider = self.bucket.connection.provider + headers = headers or {} + if policy: + headers[provider.acl_header] = policy + if encrypt_key: + headers[provider.server_side_encryption_header] = 'AES256' + + if rewind: + # caller requests reading from beginning of fp. + fp.seek(0, os.SEEK_SET) + else: + # The following seek/tell/seek logic is intended + # to detect applications using the older interface to + # set_contents_from_file(), which automatically rewound the + # file each time the Key was reused. This changed with commit + # 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads + # split into multiple parts and uploaded in parallel, and at + # the time of that commit this check was added because otherwise + # older programs would get a success status and upload an empty + # object. Unfortuantely, it's very inefficient for fp's implemented + # by KeyFile (used, for example, by gsutil when copying between + # providers). So, we skip the check for the KeyFile case. + # TODO: At some point consider removing this seek/tell/seek + # logic, after enough time has passed that it's unlikely any + # programs remain that assume the older auto-rewind interface. + if not isinstance(fp, KeyFile): + spos = fp.tell() + fp.seek(0, os.SEEK_END) + if fp.tell() == spos: + fp.seek(0, os.SEEK_SET) + if fp.tell() != spos: + # Raise an exception as this is likely a programming + # error whereby there is data before the fp but nothing + # after it. + fp.seek(spos) + raise AttributeError('fp is at EOF. Use rewind option ' + 'or seek() to data start.') + # seek back to the correct position. + fp.seek(spos) + + if reduced_redundancy: + self.storage_class = 'REDUCED_REDUNDANCY' + if provider.storage_class_header: + headers[provider.storage_class_header] = self.storage_class + # TODO - What if provider doesn't support reduced reduncancy? + # What if different providers provide different classes? + if hasattr(fp, 'name'): + self.path = fp.name + if self.bucket is not None: + if not md5 and provider.supports_chunked_transfer(): + # defer md5 calculation to on the fly and + # we don't know anything about size yet. + chunked_transfer = True + self.size = None + else: + chunked_transfer = False + if isinstance(fp, KeyFile): + # Avoid EOF seek for KeyFile case as it's very inefficient. + key = fp.getkey() + size = key.size - fp.tell() + self.size = size + # At present both GCS and S3 use MD5 for the etag for + # non-multipart-uploaded objects. If the etag is 32 hex + # chars use it as an MD5, to avoid having to read the file + # twice while transferring. + if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)): + etag = key.etag.strip('"') + md5 = (etag, base64.b64encode(binascii.unhexlify(etag))) + if not md5: + # compute_md5() and also set self.size to actual + # size of the bytes read computing the md5. + md5 = self.compute_md5(fp, size) + # adjust size if required + size = self.size + elif size: + self.size = size + else: + # If md5 is provided, still need to size so + # calculate based on bytes to end of content + spos = fp.tell() + fp.seek(0, os.SEEK_END) + self.size = fp.tell() - spos + fp.seek(spos) + size = self.size + self.md5 = md5[0] + self.base64md5 = md5[1] + + if self.name is None: + self.name = self.md5 + if not replace: + if self.bucket.lookup(self.name): + return + + self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb, + query_args=query_args, + chunked_transfer=chunked_transfer, size=size) + # return number of bytes written. + return self.size + + def set_contents_from_filename(self, filename, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=False, + encrypt_key=False): + """ + Store an object in S3 using the name of the Key object as the + key in S3 and the contents of the file named by 'filename'. + See set_contents_from_file method for details about the + parameters. + + :type filename: string + :param filename: The name of the file that you want to put onto S3 + + :type headers: dict + :param headers: Additional headers to pass along with the + request to AWS. + + :type replace: bool + :param replace: If True, replaces the contents of the file + if it already exists. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + :type md5: A tuple containing the hexdigest version of the MD5 + checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second + element. This is the same format returned by the + compute_md5 method. + :param md5: If you need to compute the MD5 for any reason + prior to upload, it's silly to have to do it twice so this + param, if present, will be used as the MD5 values of the + file. Otherwise, the checksum will be computed. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object + will be encrypted on the server-side by S3 and will be + stored in an encrypted form while at rest in S3. + + :rtype: int + :return: The number of bytes written to the key. + """ + with open(filename, 'rb') as fp: + return self.set_contents_from_file(fp, headers, replace, cb, + num_cb, policy, md5, + reduced_redundancy, + encrypt_key=encrypt_key) + + def set_contents_from_string(self, string_data, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=False, + encrypt_key=False): + """ + Store an object in S3 using the name of the Key object as the + key in S3 and the string 's' as the contents. + See set_contents_from_file method for details about the + parameters. + + :type headers: dict + :param headers: Additional headers to pass along with the + request to AWS. + + :type replace: bool + :param replace: If True, replaces the contents of the file if + it already exists. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + :type md5: A tuple containing the hexdigest version of the MD5 + checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second + element. This is the same format returned by the + compute_md5 method. + :param md5: If you need to compute the MD5 for any reason + prior to upload, it's silly to have to do it twice so this + param, if present, will be used as the MD5 values of the + file. Otherwise, the checksum will be computed. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + """ + if not isinstance(string_data, bytes): + string_data = string_data.encode("utf-8") + fp = BytesIO(string_data) + r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, + policy, md5, reduced_redundancy, + encrypt_key=encrypt_key) + fp.close() + return r + + def get_file(self, fp, headers=None, cb=None, num_cb=10, + torrent=False, version_id=None, override_num_retries=None, + response_headers=None): + """ + Retrieves a file from an S3 Key + + :type fp: file + :param fp: File pointer to put the data into + + :type headers: string + :param: headers to send when retrieving the files + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: Flag for whether to get a torrent for the file + + :type override_num_retries: int + :param override_num_retries: If not None will override configured + num_retries parameter for underlying GET. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. + """ + self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, + torrent=torrent, version_id=version_id, + override_num_retries=override_num_retries, + response_headers=response_headers, + hash_algs=None, + query_args=None) + + def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10, + torrent=False, version_id=None, override_num_retries=None, + response_headers=None, hash_algs=None, query_args=None): + if headers is None: + headers = {} + save_debug = self.bucket.connection.debug + if self.bucket.connection.debug == 1: + self.bucket.connection.debug = 0 + + query_args = query_args or [] + if torrent: + query_args.append('torrent') + + if hash_algs is None and not torrent: + hash_algs = {'md5': md5} + digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {}) + + # If a version_id is passed in, use that. If not, check to see + # if the Key object has an explicit version_id and, if so, use that. + # Otherwise, don't pass a version_id query param. + if version_id is None: + version_id = self.version_id + if version_id: + query_args.append('versionId=%s' % version_id) + if response_headers: + for key in response_headers: + query_args.append('%s=%s' % ( + key, urllib.parse.quote(response_headers[key]))) + query_args = '&'.join(query_args) + self.open('r', headers, query_args=query_args, + override_num_retries=override_num_retries) + + data_len = 0 + if cb: + if self.size is None: + cb_size = 0 + else: + cb_size = self.size + if self.size is None and num_cb != -1: + # If size is not available due to chunked transfer for example, + # we'll call the cb for every 1MB of data transferred. + cb_count = (1024 * 1024) / self.BufferSize + elif num_cb > 1: + cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0))) + elif num_cb < 0: + cb_count = -1 + else: + cb_count = 0 + i = 0 + cb(data_len, cb_size) + try: + for bytes in self: + fp.write(bytes) + data_len += len(bytes) + for alg in digesters: + digesters[alg].update(bytes) + if cb: + if cb_size > 0 and data_len >= cb_size: + break + i += 1 + if i == cb_count or cb_count == -1: + cb(data_len, cb_size) + i = 0 + except IOError as e: + if e.errno == errno.ENOSPC: + raise StorageDataError('Out of space for destination file ' + '%s' % fp.name) + raise + if cb and (cb_count <= 1 or i > 0) and data_len > 0: + cb(data_len, cb_size) + for alg in digesters: + self.local_hashes[alg] = digesters[alg].digest() + if self.size is None and not torrent and "Range" not in headers: + self.size = data_len + self.close() + self.bucket.connection.debug = save_debug + + def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10): + """ + Get a torrent file (see to get_file) + + :type fp: file + :param fp: The file pointer of where to put the torrent + + :type headers: dict + :param headers: Headers to be passed + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + """ + return self.get_file(fp, headers, cb, num_cb, torrent=True) + + def get_contents_to_file(self, fp, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None, + res_download_handler=None, + response_headers=None): + """ + Retrieve an object from S3 using the name of the Key object as the + key in S3. Write the contents of the object to the file pointed + to by 'fp'. + + :type fp: File -like object + :param fp: + + :type headers: dict + :param headers: additional HTTP headers that will be sent with + the GET request. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent + file as a string. + + :type res_upload_handler: ResumableDownloadHandler + :param res_download_handler: If provided, this handler will + perform the download. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. + """ + if self.bucket is not None: + if res_download_handler: + res_download_handler.get_file(self, fp, headers, cb, num_cb, + torrent=torrent, + version_id=version_id) + else: + self.get_file(fp, headers, cb, num_cb, torrent=torrent, + version_id=version_id, + response_headers=response_headers) + + def get_contents_to_filename(self, filename, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None, + res_download_handler=None, + response_headers=None): + """ + Retrieve an object from S3 using the name of the Key object as the + key in S3. Store contents of the object to a file named by 'filename'. + See get_contents_to_file method for details about the + parameters. + + :type filename: string + :param filename: The filename of where to put the file contents + + :type headers: dict + :param headers: Any additional headers to send in the request + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent file + as a string. + + :type res_upload_handler: ResumableDownloadHandler + :param res_download_handler: If provided, this handler will + perform the download. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. + """ + try: + with open(filename, 'wb') as fp: + self.get_contents_to_file(fp, headers, cb, num_cb, + torrent=torrent, + version_id=version_id, + res_download_handler=res_download_handler, + response_headers=response_headers) + except Exception: + os.remove(filename) + raise + # if last_modified date was sent from s3, try to set file's timestamp + if self.last_modified is not None: + try: + modified_tuple = email.utils.parsedate_tz(self.last_modified) + modified_stamp = int(email.utils.mktime_tz(modified_tuple)) + os.utime(fp.name, (modified_stamp, modified_stamp)) + except Exception: + pass + + def get_contents_as_string(self, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None, + response_headers=None, encoding=None): + """ + Retrieve an object from S3 using the name of the Key object as the + key in S3. Return the contents of the object as a string. + See get_contents_to_file method for details about the + parameters. + + :type headers: dict + :param headers: Any additional headers to send in the request + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent file + as a string. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. + + :type encoding: str + :param encoding: The text encoding to use, such as ``utf-8`` + or ``iso-8859-1``. If set, then a string will be returned. + Defaults to ``None`` and returns bytes. + + :rtype: bytes or str + :returns: The contents of the file as bytes or a string + """ + fp = BytesIO() + self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent, + version_id=version_id, + response_headers=response_headers) + value = fp.getvalue() + + if encoding is not None: + value = value.decode(encoding) + + return value + + def add_email_grant(self, permission, email_address, headers=None): + """ + Convenience method that provides a quick way to add an email grant + to a key. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL + and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type email_address: string + :param email_address: The email address associated with the AWS + account your are granting the permission to. + + :type recursive: boolean + :param recursive: A boolean value to controls whether the + command will apply the grant to all keys within the bucket + or not. The default value is False. By passing a True + value, the call will iterate through all keys in the + bucket and apply the same grant to each key. CAUTION: If + you have a lot of keys, this could take a long time! + """ + policy = self.get_acl(headers=headers) + policy.acl.add_email_grant(permission, email_address) + self.set_acl(policy, headers=headers) + + def add_user_grant(self, permission, user_id, headers=None, + display_name=None): + """ + Convenience method that provides a quick way to add a canonical + user grant to a key. This method retrieves the current ACL, + creates a new grant based on the parameters passed in, adds that + grant to the ACL and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type user_id: string + :param user_id: The canonical user id associated with the AWS + account your are granting the permission to. + + :type display_name: string + :param display_name: An option string containing the user's + Display Name. Only required on Walrus. + """ + policy = self.get_acl(headers=headers) + policy.acl.add_user_grant(permission, user_id, + display_name=display_name) + self.set_acl(policy, headers=headers) + + def _normalize_metadata(self, metadata): + if type(metadata) == set: + norm_metadata = set() + for k in metadata: + norm_metadata.add(k.lower()) + else: + norm_metadata = {} + for k in metadata: + norm_metadata[k.lower()] = metadata[k] + return norm_metadata + + def _get_remote_metadata(self, headers=None): + """ + Extracts metadata from existing URI into a dict, so we can + overwrite/delete from it to form the new set of metadata to apply to a + key. + """ + metadata = {} + for underscore_name in self._underscore_base_user_settable_fields: + if hasattr(self, underscore_name): + value = getattr(self, underscore_name) + if value: + # Generate HTTP field name corresponding to "_" named field. + field_name = underscore_name.replace('_', '-') + metadata[field_name.lower()] = value + # self.metadata contains custom metadata, which are all user-settable. + prefix = self.provider.metadata_prefix + for underscore_name in self.metadata: + field_name = underscore_name.replace('_', '-') + metadata['%s%s' % (prefix, field_name.lower())] = ( + self.metadata[underscore_name]) + return metadata + + def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl, + headers=None): + metadata_plus = self._normalize_metadata(metadata_plus) + metadata_minus = self._normalize_metadata(metadata_minus) + metadata = self._get_remote_metadata() + metadata.update(metadata_plus) + for h in metadata_minus: + if h in metadata: + del metadata[h] + src_bucket = self.bucket + # Boto prepends the meta prefix when adding headers, so strip prefix in + # metadata before sending back in to copy_key() call. + rewritten_metadata = {} + for h in metadata: + if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')): + rewritten_h = (h.replace('x-goog-meta-', '') + .replace('x-amz-meta-', '')) + else: + rewritten_h = h + rewritten_metadata[rewritten_h] = metadata[h] + metadata = rewritten_metadata + src_bucket.copy_key(self.name, self.bucket.name, self.name, + metadata=metadata, preserve_acl=preserve_acl, + headers=headers) + + def restore(self, days, headers=None): + """Restore an object from an archive. + + :type days: int + :param days: The lifetime of the restored object (must + be at least 1 day). If the object is already restored + then this parameter can be used to readjust the lifetime + of the restored object. In this case, the days + param is with respect to the initial time of the request. + If the object has not been restored, this param is with + respect to the completion time of the request. + + """ + response = self.bucket.connection.make_request( + 'POST', self.bucket.name, self.name, + data=self.RestoreBody % days, + headers=headers, query_args='restore') + if response.status not in (200, 202): + provider = self.bucket.connection.provider + raise provider.storage_response_error(response.status, + response.reason, + response.read()) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/keyfile.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/keyfile.py new file mode 100644 index 0000000000000000000000000000000000000000..4245413d740787ac723a839a26be0506eb682f40 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/keyfile.py @@ -0,0 +1,134 @@ +# Copyright 2013 Google Inc. +# Copyright 2011, Nexenta Systems Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Wrapper class to expose a Key being read via a partial implementaiton of the +Python file interface. The only functions supported are those needed for seeking +in a Key open for reading. +""" + +import os +from boto.exception import StorageResponseError + +class KeyFile(): + + def __init__(self, key): + self.key = key + self.key.open_read() + self.location = 0 + self.closed = False + self.softspace = -1 # Not implemented. + self.mode = 'r' + self.encoding = 'Undefined in KeyFile' + self.errors = 'Undefined in KeyFile' + self.newlines = 'Undefined in KeyFile' + self.name = key.name + + def tell(self): + if self.location is None: + raise ValueError("I/O operation on closed file") + return self.location + + def seek(self, pos, whence=os.SEEK_SET): + self.key.close(fast=True) + if whence == os.SEEK_END: + # We need special handling for this case because sending an HTTP range GET + # with EOF for the range start would cause an invalid range error. Instead + # we position to one before EOF (plus pos) and then read one byte to + # position at EOF. + if self.key.size == 0: + # Don't try to seek with an empty key. + return + pos = self.key.size + pos - 1 + if pos < 0: + raise IOError("Invalid argument") + self.key.open_read(headers={"Range": "bytes=%d-" % pos}) + self.key.read(1) + self.location = pos + 1 + return + + if whence == os.SEEK_SET: + if pos < 0: + raise IOError("Invalid argument") + elif whence == os.SEEK_CUR: + pos += self.location + else: + raise IOError('Invalid whence param (%d) passed to seek' % whence) + try: + self.key.open_read(headers={"Range": "bytes=%d-" % pos}) + except StorageResponseError as e: + # 416 Invalid Range means that the given starting byte was past the end + # of file. We catch this because the Python file interface allows silently + # seeking past the end of the file. + if e.status != 416: + raise + + self.location = pos + + def read(self, size): + self.location += size + return self.key.read(size) + + def close(self): + self.key.close() + self.location = None + self.closed = True + + def isatty(self): + return False + + # Non-file interface, useful for code that wants to dig into underlying Key + # state. + def getkey(self): + return self.key + + # Unimplemented interfaces below here. + + def write(self, buf): + raise NotImplementedError('write not implemented in KeyFile') + + def fileno(self): + raise NotImplementedError('fileno not implemented in KeyFile') + + def flush(self): + raise NotImplementedError('flush not implemented in KeyFile') + + def next(self): + raise NotImplementedError('next not implemented in KeyFile') + + def readinto(self): + raise NotImplementedError('readinto not implemented in KeyFile') + + def readline(self): + raise NotImplementedError('readline not implemented in KeyFile') + + def readlines(self): + raise NotImplementedError('readlines not implemented in KeyFile') + + def truncate(self): + raise NotImplementedError('truncate not implemented in KeyFile') + + def writelines(self): + raise NotImplementedError('writelines not implemented in KeyFile') + + def xreadlines(self): + raise NotImplementedError('xreadlines not implemented in KeyFile') diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/lifecycle.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/lifecycle.py new file mode 100644 index 0000000000000000000000000000000000000000..8ceb879570edaab72b78a70f3dbc050b7748f053 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/lifecycle.py @@ -0,0 +1,236 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from boto.compat import six + +class Rule(object): + """ + A Lifecycle rule for an S3 bucket. + + :ivar id: Unique identifier for the rule. The value cannot be longer + than 255 characters. This value is optional. The server will + generate a unique value for the rule if no value is provided. + + :ivar prefix: Prefix identifying one or more objects to which the + rule applies. If prefix is not provided, Boto generates a default + prefix which will match all objects. + + :ivar status: If 'Enabled', the rule is currently being applied. + If 'Disabled', the rule is not currently being applied. + + :ivar expiration: An instance of `Expiration`. This indicates + the lifetime of the objects that are subject to the rule. + + :ivar transition: An instance of `Transition`. This indicates + when to transition to a different storage class. + + """ + def __init__(self, id=None, prefix=None, status=None, expiration=None, + transition=None): + self.id = id + self.prefix = '' if prefix is None else prefix + self.status = status + if isinstance(expiration, six.integer_types): + # retain backwards compatibility??? + self.expiration = Expiration(days=expiration) + else: + # None or object + self.expiration = expiration + self.transition = transition + + def __repr__(self): + return '' % self.id + + def startElement(self, name, attrs, connection): + if name == 'Transition': + self.transition = Transition() + return self.transition + elif name == 'Expiration': + self.expiration = Expiration() + return self.expiration + return None + + def endElement(self, name, value, connection): + if name == 'ID': + self.id = value + elif name == 'Prefix': + self.prefix = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + if self.id is not None: + s += '%s' % self.id + s += '%s' % self.prefix + s += '%s' % self.status + if self.expiration is not None: + s += self.expiration.to_xml() + if self.transition is not None: + s += self.transition.to_xml() + s += '' + return s + +class Expiration(object): + """ + When an object will expire. + + :ivar days: The number of days until the object expires + + :ivar date: The date when the object will expire. Must be + in ISO 8601 format. + """ + def __init__(self, days=None, date=None): + self.days = days + self.date = date + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Days': + self.days = int(value) + elif name == 'Date': + self.date = value + + def __repr__(self): + if self.days is None: + how_long = "on: %s" % self.date + else: + how_long = "in: %s days" % self.days + return '' % how_long + + def to_xml(self): + s = '' + if self.days is not None: + s += '%s' % self.days + elif self.date is not None: + s += '%s' % self.date + s += '' + return s + +class Transition(object): + """ + A transition to a different storage class. + + :ivar days: The number of days until the object should be moved. + + :ivar date: The date when the object should be moved. Should be + in ISO 8601 format. + + :ivar storage_class: The storage class to transition to. Valid + values are GLACIER. + + """ + def __init__(self, days=None, date=None, storage_class=None): + self.days = days + self.date = date + self.storage_class = storage_class + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Days': + self.days = int(value) + elif name == 'Date': + self.date = value + elif name == 'StorageClass': + self.storage_class = value + + def __repr__(self): + if self.days is None: + how_long = "on: %s" % self.date + else: + how_long = "in: %s days" % self.days + return '' % (how_long, self.storage_class) + + def to_xml(self): + s = '' + s += '%s' % self.storage_class + if self.days is not None: + s += '%s' % self.days + elif self.date is not None: + s += '%s' % self.date + s += '' + return s + +class Lifecycle(list): + """ + A container for the rules associated with a Lifecycle configuration. + """ + + def startElement(self, name, attrs, connection): + if name == 'Rule': + rule = Rule() + self.append(rule) + return rule + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + def to_xml(self): + """ + Returns a string containing the XML version of the Lifecycle + configuration as defined by S3. + """ + s = '' + s += '' + for rule in self: + s += rule.to_xml() + s += '' + return s + + def add_rule(self, id=None, prefix='', status='Enabled', + expiration=None, transition=None): + """ + Add a rule to this Lifecycle configuration. This only adds + the rule to the local copy. To install the new rule(s) on + the bucket, you need to pass this Lifecycle config object + to the configure_lifecycle method of the Bucket object. + + :type id: str + :param id: Unique identifier for the rule. The value cannot be longer + than 255 characters. This value is optional. The server will + generate a unique value for the rule if no value is provided. + + :type prefix: str + :iparam prefix: Prefix identifying one or more objects to which the + rule applies. + + :type status: str + :param status: If 'Enabled', the rule is currently being applied. + If 'Disabled', the rule is not currently being applied. + + :type expiration: int + :param expiration: Indicates the lifetime, in days, of the objects + that are subject to the rule. The value must be a non-zero + positive integer. A Expiration object instance is also perfect. + + :type transition: Transition + :param transition: Indicates when an object transitions to a + different storage class. + """ + rule = Rule(id, prefix, status, expiration, transition) + self.append(rule) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/multidelete.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/multidelete.py new file mode 100644 index 0000000000000000000000000000000000000000..3e2d48e32d174422fef512c176b418c7915d46c1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/multidelete.py @@ -0,0 +1,138 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto import handler +import xml.sax + +class Deleted(object): + """ + A successfully deleted object in a multi-object delete request. + + :ivar key: Key name of the object that was deleted. + + :ivar version_id: Version id of the object that was deleted. + + :ivar delete_marker: If True, indicates the object deleted + was a DeleteMarker. + + :ivar delete_marker_version_id: Version ID of the delete marker + deleted. + """ + def __init__(self, key=None, version_id=None, + delete_marker=False, delete_marker_version_id=None): + self.key = key + self.version_id = version_id + self.delete_marker = delete_marker + self.delete_marker_version_id = delete_marker_version_id + + def __repr__(self): + if self.version_id: + return '' % (self.key, self.version_id) + else: + return '' % self.key + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.key = value + elif name == 'VersionId': + self.version_id = value + elif name == 'DeleteMarker': + if value.lower() == 'true': + self.delete_marker = True + elif name == 'DeleteMarkerVersionId': + self.delete_marker_version_id = value + else: + setattr(self, name, value) + +class Error(object): + """ + An unsuccessful deleted object in a multi-object delete request. + + :ivar key: Key name of the object that was not deleted. + + :ivar version_id: Version id of the object that was not deleted. + + :ivar code: Status code of the failed delete operation. + + :ivar message: Status message of the failed delete operation. + """ + def __init__(self, key=None, version_id=None, + code=None, message=None): + self.key = key + self.version_id = version_id + self.code = code + self.message = message + + def __repr__(self): + if self.version_id: + return '' % (self.key, self.version_id, + self.code) + else: + return '' % (self.key, self.code) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.key = value + elif name == 'VersionId': + self.version_id = value + elif name == 'Code': + self.code = value + elif name == 'Message': + self.message = value + else: + setattr(self, name, value) + +class MultiDeleteResult(object): + """ + The status returned from a MultiObject Delete request. + + :ivar deleted: A list of successfully deleted objects. Note that if + the quiet flag was specified in the request, this list will + be empty because only error responses would be returned. + + :ivar errors: A list of unsuccessfully deleted objects. + """ + + def __init__(self, bucket=None): + self.bucket = None + self.deleted = [] + self.errors = [] + + def startElement(self, name, attrs, connection): + if name == 'Deleted': + d = Deleted() + self.deleted.append(d) + return d + elif name == 'Error': + e = Error() + self.errors.append(e) + return e + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/multipart.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/multipart.py new file mode 100644 index 0000000000000000000000000000000000000000..056f9ca52ab726db97b7d83b56862fd7395ebf74 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/multipart.py @@ -0,0 +1,330 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3 import user +from boto.s3 import key +from boto import handler +import xml.sax + + +class CompleteMultiPartUpload(object): + """ + Represents a completed MultiPart Upload. Contains the + following useful attributes: + + * location - The URI of the completed upload + * bucket_name - The name of the bucket in which the upload + is contained + * key_name - The name of the new, completed key + * etag - The MD5 hash of the completed, combined upload + * version_id - The version_id of the completed upload + * encrypted - The value of the encryption header + """ + + def __init__(self, bucket=None): + self.bucket = bucket + self.location = None + self.bucket_name = None + self.key_name = None + self.etag = None + self.version_id = None + self.encrypted = None + + def __repr__(self): + return '' % (self.bucket_name, + self.key_name) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Location': + self.location = value + elif name == 'Bucket': + self.bucket_name = value + elif name == 'Key': + self.key_name = value + elif name == 'ETag': + self.etag = value + else: + setattr(self, name, value) + + +class Part(object): + """ + Represents a single part in a MultiPart upload. + Attributes include: + + * part_number - The integer part number + * last_modified - The last modified date of this part + * etag - The MD5 hash of this part + * size - The size, in bytes, of this part + """ + + def __init__(self, bucket=None): + self.bucket = bucket + self.part_number = None + self.last_modified = None + self.etag = None + self.size = None + + def __repr__(self): + if isinstance(self.part_number, int): + return '' % self.part_number + else: + return '' % None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'PartNumber': + self.part_number = int(value) + elif name == 'LastModified': + self.last_modified = value + elif name == 'ETag': + self.etag = value + elif name == 'Size': + self.size = int(value) + else: + setattr(self, name, value) + + +def part_lister(mpupload, part_number_marker=None): + """ + A generator function for listing parts of a multipart upload. + """ + more_results = True + part = None + while more_results: + parts = mpupload.get_all_parts(None, part_number_marker) + for part in parts: + yield part + part_number_marker = mpupload.next_part_number_marker + more_results = mpupload.is_truncated + + +class MultiPartUpload(object): + """ + Represents a MultiPart Upload operation. + """ + + def __init__(self, bucket=None): + self.bucket = bucket + self.bucket_name = None + self.key_name = None + self.id = id + self.initiator = None + self.owner = None + self.storage_class = None + self.initiated = None + self.part_number_marker = None + self.next_part_number_marker = None + self.max_parts = None + self.is_truncated = False + self._parts = None + + def __repr__(self): + return '' % self.key_name + + def __iter__(self): + return part_lister(self) + + def to_xml(self): + s = '\n' + for part in self: + s += ' \n' + s += ' %d\n' % part.part_number + s += ' %s\n' % part.etag + s += ' \n' + s += '' + return s + + def startElement(self, name, attrs, connection): + if name == 'Initiator': + self.initiator = user.User(self) + return self.initiator + elif name == 'Owner': + self.owner = user.User(self) + return self.owner + elif name == 'Part': + part = Part(self.bucket) + self._parts.append(part) + return part + return None + + def endElement(self, name, value, connection): + if name == 'Bucket': + self.bucket_name = value + elif name == 'Key': + self.key_name = value + elif name == 'UploadId': + self.id = value + elif name == 'StorageClass': + self.storage_class = value + elif name == 'PartNumberMarker': + self.part_number_marker = value + elif name == 'NextPartNumberMarker': + self.next_part_number_marker = value + elif name == 'MaxParts': + self.max_parts = int(value) + elif name == 'IsTruncated': + if value == 'true': + self.is_truncated = True + else: + self.is_truncated = False + elif name == 'Initiated': + self.initiated = value + else: + setattr(self, name, value) + + def get_all_parts(self, max_parts=None, part_number_marker=None, + encoding_type=None): + """ + Return the uploaded parts of this MultiPart Upload. This is + a lower-level method that requires you to manually page through + results. To simplify this process, you can just use the + object itself as an iterator and it will automatically handle + all of the paging with S3. + """ + self._parts = [] + query_args = 'uploadId=%s' % self.id + if max_parts: + query_args += '&max-parts=%d' % max_parts + if part_number_marker: + query_args += '&part-number-marker=%s' % part_number_marker + if encoding_type: + query_args += '&encoding-type=%s' % encoding_type + response = self.bucket.connection.make_request('GET', self.bucket.name, + self.key_name, + query_args=query_args) + body = response.read() + if response.status == 200: + h = handler.XmlHandler(self, self) + xml.sax.parseString(body, h) + return self._parts + + def upload_part_from_file(self, fp, part_num, headers=None, replace=True, + cb=None, num_cb=10, md5=None, size=None): + """ + Upload another part of this MultiPart Upload. + + .. note:: + + After you initiate multipart upload and upload one or more parts, + you must either complete or abort multipart upload in order to stop + getting charged for storage of the uploaded parts. Only after you + either complete or abort multipart upload, Amazon S3 frees up the + parts storage and stops charging you for the parts storage. + + :type fp: file + :param fp: The file object you want to upload. + + :type part_num: int + :param part_num: The number of this part. + + The other parameters are exactly as defined for the + :class:`boto.s3.key.Key` set_contents_from_file method. + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: The uploaded part containing the etag. + """ + if part_num < 1: + raise ValueError('Part numbers must be greater than zero') + query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num) + key = self.bucket.new_key(self.key_name) + key.set_contents_from_file(fp, headers=headers, replace=replace, + cb=cb, num_cb=num_cb, md5=md5, + reduced_redundancy=False, + query_args=query_args, size=size) + return key + + def copy_part_from_key(self, src_bucket_name, src_key_name, part_num, + start=None, end=None, src_version_id=None, + headers=None): + """ + Copy another part of this MultiPart Upload. + + :type src_bucket_name: string + :param src_bucket_name: Name of the bucket containing the source key + + :type src_key_name: string + :param src_key_name: Name of the source key + + :type part_num: int + :param part_num: The number of this part. + + :type start: int + :param start: Zero-based byte offset to start copying from + + :type end: int + :param end: Zero-based byte offset to copy to + + :type src_version_id: string + :param src_version_id: version_id of source object to copy from + + :type headers: dict + :param headers: Any headers to pass along in the request + """ + if part_num < 1: + raise ValueError('Part numbers must be greater than zero') + query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num) + if start is not None and end is not None: + rng = 'bytes=%s-%s' % (start, end) + provider = self.bucket.connection.provider + if headers is None: + headers = {} + else: + headers = headers.copy() + headers[provider.copy_source_range_header] = rng + return self.bucket.copy_key(self.key_name, src_bucket_name, + src_key_name, + src_version_id=src_version_id, + storage_class=None, + headers=headers, + query_args=query_args) + + def complete_upload(self): + """ + Complete the MultiPart Upload operation. This method should + be called when all parts of the file have been successfully + uploaded to S3. + + :rtype: :class:`boto.s3.multipart.CompletedMultiPartUpload` + :returns: An object representing the completed upload. + """ + xml = self.to_xml() + return self.bucket.complete_multipart_upload(self.key_name, + self.id, xml) + + def cancel_upload(self): + """ + Cancels a MultiPart Upload operation. The storage consumed by + any previously uploaded parts will be freed. However, if any + part uploads are currently in progress, those part uploads + might or might not succeed. As a result, it might be necessary + to abort a given multipart upload multiple times in order to + completely free all storage consumed by all parts. + """ + self.bucket.cancel_multipart_upload(self.key_name, self.id) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/prefix.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/prefix.py new file mode 100644 index 0000000000000000000000000000000000000000..adf28e935f0df19797a14cd257c6c62937bf2d57 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/prefix.py @@ -0,0 +1,42 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Prefix(object): + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Prefix': + self.name = value + else: + setattr(self, name, value) + + @property + def provider(self): + provider = None + if self.bucket and self.bucket.connection: + provider = self.bucket.connection.provider + return provider + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/resumable_download_handler.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/resumable_download_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..a2a88c74cd9a6ffb8b7d0046e1588aec673326d9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/resumable_download_handler.py @@ -0,0 +1,352 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import errno +import httplib +import os +import re +import socket +import time +import boto +from boto import config, storage_uri_for_key +from boto.connection import AWSAuthConnection +from boto.exception import ResumableDownloadException +from boto.exception import ResumableTransferDisposition +from boto.s3.keyfile import KeyFile +from boto.gs.key import Key as GSKey + +""" +Resumable download handler. + +Resumable downloads will retry failed downloads, resuming at the byte count +completed by the last download attempt. If too many retries happen with no +progress (per configurable num_retries param), the download will be aborted. + +The caller can optionally specify a tracker_file_name param in the +ResumableDownloadHandler constructor. If you do this, that file will +save the state needed to allow retrying later, in a separate process +(e.g., in a later run of gsutil). + +Note that resumable downloads work across providers (they depend only +on support Range GETs), but this code is in the boto.s3 package +because it is the wrong abstraction level to go in the top-level boto +package. + +TODO: At some point we should refactor the code to have a storage_service +package where all these provider-independent files go. +""" + + +class ByteTranslatingCallbackHandler(object): + """ + Proxy class that translates progress callbacks made by + boto.s3.Key.get_file(), taking into account that we're resuming + a download. + """ + def __init__(self, proxied_cb, download_start_point): + self.proxied_cb = proxied_cb + self.download_start_point = download_start_point + + def call(self, total_bytes_uploaded, total_size): + self.proxied_cb(self.download_start_point + total_bytes_uploaded, + total_size) + + +def get_cur_file_size(fp, position_to_eof=False): + """ + Returns size of file, optionally leaving fp positioned at EOF. + """ + if isinstance(fp, KeyFile) and not position_to_eof: + # Avoid EOF seek for KeyFile case as it's very inefficient. + return fp.getkey().size + if not position_to_eof: + cur_pos = fp.tell() + fp.seek(0, os.SEEK_END) + cur_file_size = fp.tell() + if not position_to_eof: + fp.seek(cur_pos, os.SEEK_SET) + return cur_file_size + + +class ResumableDownloadHandler(object): + """ + Handler for resumable downloads. + """ + + MIN_ETAG_LEN = 5 + + RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error, + socket.gaierror) + + def __init__(self, tracker_file_name=None, num_retries=None): + """ + Constructor. Instantiate once for each downloaded file. + + :type tracker_file_name: string + :param tracker_file_name: optional file name to save tracking info + about this download. If supplied and the current process fails + the download, it can be retried in a new process. If called + with an existing file containing an unexpired timestamp, + we'll resume the transfer for this file; else we'll start a + new resumable download. + + :type num_retries: int + :param num_retries: the number of times we'll re-try a resumable + download making no progress. (Count resets every time we get + progress, so download can span many more than this number of + retries.) + """ + self.tracker_file_name = tracker_file_name + self.num_retries = num_retries + self.etag_value_for_current_download = None + if tracker_file_name: + self._load_tracker_file_etag() + # Save download_start_point in instance state so caller can + # find how much was transferred by this ResumableDownloadHandler + # (across retries). + self.download_start_point = None + + def _load_tracker_file_etag(self): + f = None + try: + f = open(self.tracker_file_name, 'r') + self.etag_value_for_current_download = f.readline().rstrip('\n') + # We used to match an MD5-based regex to ensure that the etag was + # read correctly. Since ETags need not be MD5s, we now do a simple + # length sanity check instead. + if len(self.etag_value_for_current_download) < self.MIN_ETAG_LEN: + print('Couldn\'t read etag in tracker file (%s). Restarting ' + 'download from scratch.' % self.tracker_file_name) + except IOError as e: + # Ignore non-existent file (happens first time a download + # is attempted on an object), but warn user for other errors. + if e.errno != errno.ENOENT: + # Will restart because + # self.etag_value_for_current_download is None. + print('Couldn\'t read URI tracker file (%s): %s. Restarting ' + 'download from scratch.' % + (self.tracker_file_name, e.strerror)) + finally: + if f: + f.close() + + def _save_tracker_info(self, key): + self.etag_value_for_current_download = key.etag.strip('"\'') + if not self.tracker_file_name: + return + f = None + try: + f = open(self.tracker_file_name, 'w') + f.write('%s\n' % self.etag_value_for_current_download) + except IOError as e: + raise ResumableDownloadException( + 'Couldn\'t write tracker file (%s): %s.\nThis can happen' + 'if you\'re using an incorrectly configured download tool\n' + '(e.g., gsutil configured to save tracker files to an ' + 'unwritable directory)' % + (self.tracker_file_name, e.strerror), + ResumableTransferDisposition.ABORT) + finally: + if f: + f.close() + + def _remove_tracker_file(self): + if (self.tracker_file_name and + os.path.exists(self.tracker_file_name)): + os.unlink(self.tracker_file_name) + + def _attempt_resumable_download(self, key, fp, headers, cb, num_cb, + torrent, version_id, hash_algs): + """ + Attempts a resumable download. + + Raises ResumableDownloadException if any problems occur. + """ + cur_file_size = get_cur_file_size(fp, position_to_eof=True) + + if (cur_file_size and + self.etag_value_for_current_download and + self.etag_value_for_current_download == key.etag.strip('"\'')): + # Try to resume existing transfer. + if cur_file_size > key.size: + raise ResumableDownloadException( + '%s is larger (%d) than %s (%d).\nDeleting tracker file, so ' + 'if you re-try this download it will start from scratch' % + (fp.name, cur_file_size, str(storage_uri_for_key(key)), + key.size), ResumableTransferDisposition.ABORT) + elif cur_file_size == key.size: + if key.bucket.connection.debug >= 1: + print('Download complete.') + return + if key.bucket.connection.debug >= 1: + print('Resuming download.') + headers = headers.copy() + headers['Range'] = 'bytes=%d-%d' % (cur_file_size, key.size - 1) + cb = ByteTranslatingCallbackHandler(cb, cur_file_size).call + self.download_start_point = cur_file_size + else: + if key.bucket.connection.debug >= 1: + print('Starting new resumable download.') + self._save_tracker_info(key) + self.download_start_point = 0 + # Truncate the file, in case a new resumable download is being + # started atop an existing file. + fp.truncate(0) + + # Disable AWSAuthConnection-level retry behavior, since that would + # cause downloads to restart from scratch. + if isinstance(key, GSKey): + key.get_file(fp, headers, cb, num_cb, torrent, version_id, + override_num_retries=0, hash_algs=hash_algs) + else: + key.get_file(fp, headers, cb, num_cb, torrent, version_id, + override_num_retries=0) + fp.flush() + + def get_file(self, key, fp, headers, cb=None, num_cb=10, torrent=False, + version_id=None, hash_algs=None): + """ + Retrieves a file from a Key + :type key: :class:`boto.s3.key.Key` or subclass + :param key: The Key object from which upload is to be downloaded + + :type fp: file + :param fp: File pointer into which data should be downloaded + + :type headers: string + :param: headers to send when retrieving the files + + :type cb: function + :param cb: (optional) a callback function that will be called to report + progress on the download. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted from the storage service and + the second representing the total number of bytes that need + to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the cb + parameter this parameter determines the granularity of the callback + by defining the maximum number of times the callback will be + called during the file transfer. + + :type torrent: bool + :param torrent: Flag for whether to get a torrent for the file + + :type version_id: string + :param version_id: The version ID (optional) + + :type hash_algs: dictionary + :param hash_algs: (optional) Dictionary of hash algorithms and + corresponding hashing class that implements update() and digest(). + Defaults to {'md5': hashlib/md5.md5}. + + Raises ResumableDownloadException if a problem occurs during + the transfer. + """ + + debug = key.bucket.connection.debug + if not headers: + headers = {} + + # Use num-retries from constructor if one was provided; else check + # for a value specified in the boto config file; else default to 6. + if self.num_retries is None: + self.num_retries = config.getint('Boto', 'num_retries', 6) + progress_less_iterations = 0 + + while True: # Retry as long as we're making progress. + had_file_bytes_before_attempt = get_cur_file_size(fp) + try: + self._attempt_resumable_download(key, fp, headers, cb, num_cb, + torrent, version_id, hash_algs) + # Download succceded, so remove the tracker file (if have one). + self._remove_tracker_file() + # Previously, check_final_md5() was called here to validate + # downloaded file's checksum, however, to be consistent with + # non-resumable downloads, this call was removed. Checksum + # validation of file contents should be done by the caller. + if debug >= 1: + print('Resumable download complete.') + return + except self.RETRYABLE_EXCEPTIONS as e: + if debug >= 1: + print('Caught exception (%s)' % e.__repr__()) + if isinstance(e, IOError) and e.errno == errno.EPIPE: + # Broken pipe error causes httplib to immediately + # close the socket (http://bugs.python.org/issue5542), + # so we need to close and reopen the key before resuming + # the download. + if isinstance(key, GSKey): + key.get_file(fp, headers, cb, num_cb, torrent, version_id, + override_num_retries=0, hash_algs=hash_algs) + else: + key.get_file(fp, headers, cb, num_cb, torrent, version_id, + override_num_retries=0) + except ResumableDownloadException as e: + if (e.disposition == + ResumableTransferDisposition.ABORT_CUR_PROCESS): + if debug >= 1: + print('Caught non-retryable ResumableDownloadException ' + '(%s)' % e.message) + raise + elif (e.disposition == + ResumableTransferDisposition.ABORT): + if debug >= 1: + print('Caught non-retryable ResumableDownloadException ' + '(%s); aborting and removing tracker file' % + e.message) + self._remove_tracker_file() + raise + else: + if debug >= 1: + print('Caught ResumableDownloadException (%s) - will ' + 'retry' % e.message) + + # At this point we had a re-tryable failure; see if made progress. + if get_cur_file_size(fp) > had_file_bytes_before_attempt: + progress_less_iterations = 0 + else: + progress_less_iterations += 1 + + if progress_less_iterations > self.num_retries: + # Don't retry any longer in the current process. + raise ResumableDownloadException( + 'Too many resumable download attempts failed without ' + 'progress. You might try this download again later', + ResumableTransferDisposition.ABORT_CUR_PROCESS) + + # Close the key, in case a previous download died partway + # through and left data in the underlying key HTTP buffer. + # Do this within a try/except block in case the connection is + # closed (since key.close() attempts to do a final read, in which + # case this read attempt would get an IncompleteRead exception, + # which we can safely ignore. + try: + key.close() + except httplib.IncompleteRead: + pass + + sleep_time_secs = 2**progress_less_iterations + if debug >= 1: + print('Got retryable failure (%d progress-less in a row).\n' + 'Sleeping %d seconds before re-trying' % + (progress_less_iterations, sleep_time_secs)) + time.sleep(sleep_time_secs) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/tagging.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/tagging.py new file mode 100644 index 0000000000000000000000000000000000000000..0af6406fb12c6cd316cdd41e759fb0398abba632 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/tagging.py @@ -0,0 +1,71 @@ +from boto import handler +import xml.sax + + +class Tag(object): + def __init__(self, key=None, value=None): + self.key = key + self.value = value + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.key = value + elif name == 'Value': + self.value = value + + def to_xml(self): + return '%s%s' % ( + self.key, self.value) + + def __eq__(self, other): + return (self.key == other.key and self.value == other.value) + + +class TagSet(list): + def startElement(self, name, attrs, connection): + if name == 'Tag': + tag = Tag() + self.append(tag) + return tag + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + def add_tag(self, key, value): + tag = Tag(key, value) + self.append(tag) + + def to_xml(self): + xml = '' + for tag in self: + xml += tag.to_xml() + xml += '' + return xml + + +class Tags(list): + """A container for the tags associated with a bucket.""" + + def startElement(self, name, attrs, connection): + if name == 'TagSet': + tag_set = TagSet() + self.append(tag_set) + return tag_set + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + def to_xml(self): + xml = '' + for tag_set in self: + xml += tag_set.to_xml() + xml +='' + return xml + + def add_tag_set(self, tag_set): + self.append(tag_set) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/user.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/user.py new file mode 100644 index 0000000000000000000000000000000000000000..f2cbbb7f8f4727c0a8d4cb2976c5912bdac5912e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/user.py @@ -0,0 +1,49 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class User(object): + def __init__(self, parent=None, id='', display_name=''): + if parent: + parent.owner = self + self.type = None + self.id = id + self.display_name = display_name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DisplayName': + self.display_name = value + elif name == 'ID': + self.id = value + else: + setattr(self, name, value) + + def to_xml(self, element_name='Owner'): + if self.type: + s = '<%s xsi:type="%s">' % (element_name, self.type) + else: + s = '<%s>' % element_name + s += '%s' % self.id + s += '%s' % self.display_name + s += '' % element_name + return s diff --git a/desktop/core/ext-py/boto-2.38.0/boto/s3/website.py b/desktop/core/ext-py/boto-2.38.0/boto/s3/website.py new file mode 100644 index 0000000000000000000000000000000000000000..c307f3e9907b37e47cd3f2c3a4aba2e701beda83 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/s3/website.py @@ -0,0 +1,293 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +def tag(key, value): + start = '<%s>' % key + end = '' % key + return '%s%s%s' % (start, value, end) + + +class WebsiteConfiguration(object): + """ + Website configuration for a bucket. + + :ivar suffix: Suffix that is appended to a request that is for a + "directory" on the website endpoint (e.g. if the suffix is + index.html and you make a request to samplebucket/images/ + the data that is returned will be for the object with the + key name images/index.html). The suffix must not be empty + and must not include a slash character. + + :ivar error_key: The object key name to use when a 4xx class error + occurs. This key identifies the page that is returned when + such an error occurs. + + :ivar redirect_all_requests_to: Describes the redirect behavior for every + request to this bucket's website endpoint. If this value is non None, + no other values are considered when configuring the website + configuration for the bucket. This is an instance of + ``RedirectLocation``. + + :ivar routing_rules: ``RoutingRules`` object which specifies conditions + and redirects that apply when the conditions are met. + + """ + + def __init__(self, suffix=None, error_key=None, + redirect_all_requests_to=None, routing_rules=None): + self.suffix = suffix + self.error_key = error_key + self.redirect_all_requests_to = redirect_all_requests_to + if routing_rules is not None: + self.routing_rules = routing_rules + else: + self.routing_rules = RoutingRules() + + def startElement(self, name, attrs, connection): + if name == 'RoutingRules': + self.routing_rules = RoutingRules() + return self.routing_rules + elif name == 'IndexDocument': + return _XMLKeyValue([('Suffix', 'suffix')], container=self) + elif name == 'ErrorDocument': + return _XMLKeyValue([('Key', 'error_key')], container=self) + + def endElement(self, name, value, connection): + pass + + def to_xml(self): + parts = ['', + ''] + if self.suffix is not None: + parts.append(tag('IndexDocument', tag('Suffix', self.suffix))) + if self.error_key is not None: + parts.append(tag('ErrorDocument', tag('Key', self.error_key))) + if self.redirect_all_requests_to is not None: + parts.append(self.redirect_all_requests_to.to_xml()) + if self.routing_rules: + parts.append(self.routing_rules.to_xml()) + parts.append('') + return ''.join(parts) + + +class _XMLKeyValue(object): + def __init__(self, translator, container=None): + self.translator = translator + if container: + self.container = container + else: + self.container = self + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + for xml_key, attr_name in self.translator: + if name == xml_key: + setattr(self.container, attr_name, value) + + def to_xml(self): + parts = [] + for xml_key, attr_name in self.translator: + content = getattr(self.container, attr_name) + if content is not None: + parts.append(tag(xml_key, content)) + return ''.join(parts) + + +class RedirectLocation(_XMLKeyValue): + """Specify redirect behavior for every request to a bucket's endpoint. + + :ivar hostname: Name of the host where requests will be redirected. + + :ivar protocol: Protocol to use (http, https) when redirecting requests. + The default is the protocol that is used in the original request. + + """ + TRANSLATOR = [('HostName', 'hostname'), + ('Protocol', 'protocol'), + ] + + def __init__(self, hostname=None, protocol=None): + self.hostname = hostname + self.protocol = protocol + super(RedirectLocation, self).__init__(self.TRANSLATOR) + + def to_xml(self): + return tag('RedirectAllRequestsTo', + super(RedirectLocation, self).to_xml()) + + +class RoutingRules(list): + + def add_rule(self, rule): + """ + + :type rule: :class:`boto.s3.website.RoutingRule` + :param rule: A routing rule. + + :return: This ``RoutingRules`` object is returned, + so that it can chain subsequent calls. + + """ + self.append(rule) + return self + + def startElement(self, name, attrs, connection): + if name == 'RoutingRule': + rule = RoutingRule(Condition(), Redirect()) + self.add_rule(rule) + return rule + + def endElement(self, name, value, connection): + pass + + def __repr__(self): + return "RoutingRules(%s)" % super(RoutingRules, self).__repr__() + + def to_xml(self): + inner_text = [] + for rule in self: + inner_text.append(rule.to_xml()) + return tag('RoutingRules', '\n'.join(inner_text)) + + +class RoutingRule(object): + """Represents a single routing rule. + + There are convenience methods to making creating rules + more concise:: + + rule = RoutingRule.when(key_prefix='foo/').then_redirect('example.com') + + :ivar condition: Describes condition that must be met for the + specified redirect to apply. + + :ivar redirect: Specifies redirect behavior. You can redirect requests to + another host, to another page, or with another protocol. In the event + of an error, you can can specify a different error code to return. + + """ + def __init__(self, condition=None, redirect=None): + self.condition = condition + self.redirect = redirect + + def startElement(self, name, attrs, connection): + if name == 'Condition': + return self.condition + elif name == 'Redirect': + return self.redirect + + def endElement(self, name, value, connection): + pass + + def to_xml(self): + parts = [] + if self.condition: + parts.append(self.condition.to_xml()) + if self.redirect: + parts.append(self.redirect.to_xml()) + return tag('RoutingRule', '\n'.join(parts)) + + @classmethod + def when(cls, key_prefix=None, http_error_code=None): + return cls(Condition(key_prefix=key_prefix, + http_error_code=http_error_code), None) + + def then_redirect(self, hostname=None, protocol=None, replace_key=None, + replace_key_prefix=None, http_redirect_code=None): + self.redirect = Redirect( + hostname=hostname, protocol=protocol, + replace_key=replace_key, + replace_key_prefix=replace_key_prefix, + http_redirect_code=http_redirect_code) + return self + + +class Condition(_XMLKeyValue): + """ + :ivar key_prefix: The object key name prefix when the redirect is applied. + For example, to redirect requests for ExamplePage.html, the key prefix + will be ExamplePage.html. To redirect request for all pages with the + prefix docs/, the key prefix will be /docs, which identifies all + objects in the docs/ folder. + + :ivar http_error_code: The HTTP error code when the redirect is applied. In + the event of an error, if the error code equals this value, then the + specified redirect is applied. + + """ + TRANSLATOR = [ + ('KeyPrefixEquals', 'key_prefix'), + ('HttpErrorCodeReturnedEquals', 'http_error_code'), + ] + + def __init__(self, key_prefix=None, http_error_code=None): + self.key_prefix = key_prefix + self.http_error_code = http_error_code + super(Condition, self).__init__(self.TRANSLATOR) + + def to_xml(self): + return tag('Condition', super(Condition, self).to_xml()) + + +class Redirect(_XMLKeyValue): + """ + :ivar hostname: The host name to use in the redirect request. + + :ivar protocol: The protocol to use in the redirect request. Can be either + 'http' or 'https'. + + :ivar replace_key: The specific object key to use in the redirect request. + For example, redirect request to error.html. + + :ivar replace_key_prefix: The object key prefix to use in the redirect + request. For example, to redirect requests for all pages with prefix + docs/ (objects in the docs/ folder) to documents/, you can set a + condition block with KeyPrefixEquals set to docs/ and in the Redirect + set ReplaceKeyPrefixWith to /documents. + + :ivar http_redirect_code: The HTTP redirect code to use on the response. + + """ + + TRANSLATOR = [ + ('Protocol', 'protocol'), + ('HostName', 'hostname'), + ('ReplaceKeyWith', 'replace_key'), + ('ReplaceKeyPrefixWith', 'replace_key_prefix'), + ('HttpRedirectCode', 'http_redirect_code'), + ] + + def __init__(self, hostname=None, protocol=None, replace_key=None, + replace_key_prefix=None, http_redirect_code=None): + self.hostname = hostname + self.protocol = protocol + self.replace_key = replace_key + self.replace_key_prefix = replace_key_prefix + self.http_redirect_code = http_redirect_code + super(Redirect, self).__init__(self.TRANSLATOR) + + def to_xml(self): + return tag('Redirect', super(Redirect, self).to_xml()) + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1235a88a2e1c3856eacdd3d8b9ce4dc48a77936d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/__init__.py @@ -0,0 +1,55 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.sdb.regioninfo import SDBRegionInfo +from boto.regioninfo import get_regions + + +def regions(): + """ + Get all available regions for the SDB service. + + :rtype: list + :return: A list of :class:`boto.sdb.regioninfo.RegionInfo` instances + """ + return get_regions( + 'sdb', + region_cls=SDBRegionInfo + ) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.sdb.connection.SDBConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.sdb.connection.SDBConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..fa7cb83e3ec09a3312c48be399e0e1f07f022f8f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/connection.py @@ -0,0 +1,618 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import xml.sax +import threading +import boto +from boto import handler +from boto.connection import AWSQueryConnection +from boto.sdb.domain import Domain, DomainMetaData +from boto.sdb.item import Item +from boto.sdb.regioninfo import SDBRegionInfo +from boto.exception import SDBResponseError + +class ItemThread(threading.Thread): + """ + A threaded :class:`Item ` retriever utility class. + Retrieved :class:`Item ` objects are stored in the + ``items`` instance variable after :py:meth:`run() ` is called. + + .. tip:: The item retrieval will not start until + the :func:`run() ` method is called. + """ + def __init__(self, name, domain_name, item_names): + """ + :param str name: A thread name. Used for identification. + :param str domain_name: The name of a SimpleDB + :class:`Domain ` + :type item_names: string or list of strings + :param item_names: The name(s) of the items to retrieve from the specified + :class:`Domain `. + :ivar list items: A list of items retrieved. Starts as empty list. + """ + super(ItemThread, self).__init__(name=name) + #print 'starting %s with %d items' % (name, len(item_names)) + self.domain_name = domain_name + self.conn = SDBConnection() + self.item_names = item_names + self.items = [] + + def run(self): + """ + Start the threaded retrieval of items. Populates the + ``items`` list with :class:`Item ` objects. + """ + for item_name in self.item_names: + item = self.conn.get_attributes(self.domain_name, item_name) + self.items.append(item) + +#boto.set_stream_logger('sdb') + +class SDBConnection(AWSQueryConnection): + """ + This class serves as a gateway to your SimpleDB region (defaults to + us-east-1). Methods within allow access to SimpleDB + :class:`Domain ` objects and their associated + :class:`Item ` objects. + + .. tip:: + While you may instantiate this class directly, it may be easier to + go through :py:func:`boto.connect_sdb`. + """ + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'sdb.us-east-1.amazonaws.com' + APIVersion = '2009-04-15' + ResponseError = SDBResponseError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + converter=None, security_token=None, validate_certs=True, + profile_name=None): + """ + For any keywords that aren't documented, refer to the parent class, + :py:class:`boto.connection.AWSAuthConnection`. You can avoid having + to worry about these keyword arguments by instantiating these objects + via :py:func:`boto.connect_sdb`. + + :type region: :class:`boto.sdb.regioninfo.SDBRegionInfo` + :keyword region: Explicitly specify a region. Defaults to ``us-east-1`` + if not specified. You may also specify the region in your ``boto.cfg``: + + .. code-block:: cfg + + [SDB] + region = eu-west-1 + + """ + if not region: + region_name = boto.config.get('SDB', 'region', self.DefaultRegionName) + for reg in boto.sdb.regions(): + if reg.name == region_name: + region = reg + break + + self.region = region + super(SDBConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, + proxy_port, proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + self.box_usage = 0.0 + self.converter = converter + self.item_cls = Item + + def _required_auth_capability(self): + return ['sdb'] + + def set_item_cls(self, cls): + """ + While the default item class is :py:class:`boto.sdb.item.Item`, this + default may be overridden. Use this method to change a connection's + item class. + + :param object cls: The new class to set as this connection's item + class. See the default item class for inspiration as to what your + replacement should/could look like. + """ + self.item_cls = cls + + def _build_name_value_list(self, params, attributes, replace=False, + label='Attribute'): + keys = sorted(attributes.keys()) + i = 1 + for key in keys: + value = attributes[key] + if isinstance(value, list): + for v in value: + params['%s.%d.Name' % (label, i)] = key + if self.converter: + v = self.converter.encode(v) + params['%s.%d.Value' % (label, i)] = v + if replace: + params['%s.%d.Replace' % (label, i)] = 'true' + i += 1 + else: + params['%s.%d.Name' % (label, i)] = key + if self.converter: + value = self.converter.encode(value) + params['%s.%d.Value' % (label, i)] = value + if replace: + params['%s.%d.Replace' % (label, i)] = 'true' + i += 1 + + def _build_expected_value(self, params, expected_value): + params['Expected.1.Name'] = expected_value[0] + if expected_value[1] is True: + params['Expected.1.Exists'] = 'true' + elif expected_value[1] is False: + params['Expected.1.Exists'] = 'false' + else: + params['Expected.1.Value'] = expected_value[1] + + def _build_batch_list(self, params, items, replace=False): + item_names = items.keys() + i = 0 + for item_name in item_names: + params['Item.%d.ItemName' % i] = item_name + j = 0 + item = items[item_name] + if item is not None: + attr_names = item.keys() + for attr_name in attr_names: + value = item[attr_name] + if isinstance(value, list): + for v in value: + if self.converter: + v = self.converter.encode(v) + params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name + params['Item.%d.Attribute.%d.Value' % (i, j)] = v + if replace: + params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true' + j += 1 + else: + params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name + if self.converter: + value = self.converter.encode(value) + params['Item.%d.Attribute.%d.Value' % (i, j)] = value + if replace: + params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true' + j += 1 + i += 1 + + def _build_name_list(self, params, attribute_names): + i = 1 + attribute_names.sort() + for name in attribute_names: + params['Attribute.%d.Name' % i] = name + i += 1 + + def get_usage(self): + """ + Returns the BoxUsage (in USD) accumulated on this specific SDBConnection + instance. + + .. tip:: This can be out of date, and should only be treated as a + rough estimate. Also note that this estimate only applies to the + requests made on this specific connection instance. It is by + no means an account-wide estimate. + + :rtype: float + :return: The accumulated BoxUsage of all requests made on the connection. + """ + return self.box_usage + + def print_usage(self): + """ + Print the BoxUsage and approximate costs of all requests made on + this specific SDBConnection instance. + + .. tip:: This can be out of date, and should only be treated as a + rough estimate. Also note that this estimate only applies to the + requests made on this specific connection instance. It is by + no means an account-wide estimate. + """ + print('Total Usage: %f compute seconds' % self.box_usage) + cost = self.box_usage * 0.14 + print('Approximate Cost: $%f' % cost) + + def get_domain(self, domain_name, validate=True): + """ + Retrieves a :py:class:`boto.sdb.domain.Domain` object whose name + matches ``domain_name``. + + :param str domain_name: The name of the domain to retrieve + :keyword bool validate: When ``True``, check to see if the domain + actually exists. If ``False``, blindly return a + :py:class:`Domain ` object with the + specified name set. + + :raises: + :py:class:`boto.exception.SDBResponseError` if ``validate`` is + ``True`` and no match could be found. + + :rtype: :py:class:`boto.sdb.domain.Domain` + :return: The requested domain + """ + domain = Domain(self, domain_name) + if validate: + self.select(domain, """select * from `%s` limit 1""" % domain_name) + return domain + + def lookup(self, domain_name, validate=True): + """ + Lookup an existing SimpleDB domain. This differs from + :py:meth:`get_domain` in that ``None`` is returned if ``validate`` is + ``True`` and no match was found (instead of raising an exception). + + :param str domain_name: The name of the domain to retrieve + + :param bool validate: If ``True``, a ``None`` value will be returned + if the specified domain can't be found. If ``False``, a + :py:class:`Domain ` object will be dumbly + returned, regardless of whether it actually exists. + + :rtype: :class:`boto.sdb.domain.Domain` object or ``None`` + :return: The Domain object or ``None`` if the domain does not exist. + """ + try: + domain = self.get_domain(domain_name, validate) + except: + domain = None + return domain + + def get_all_domains(self, max_domains=None, next_token=None): + """ + Returns a :py:class:`boto.resultset.ResultSet` containing + all :py:class:`boto.sdb.domain.Domain` objects associated with + this connection's Access Key ID. + + :keyword int max_domains: Limit the returned + :py:class:`ResultSet ` to the specified + number of members. + :keyword str next_token: A token string that was returned in an + earlier call to this method as the ``next_token`` attribute + on the returned :py:class:`ResultSet ` + object. This attribute is set if there are more than Domains than + the value specified in the ``max_domains`` keyword. Pass the + ``next_token`` value from you earlier query in this keyword to + get the next 'page' of domains. + """ + params = {} + if max_domains: + params['MaxNumberOfDomains'] = max_domains + if next_token: + params['NextToken'] = next_token + return self.get_list('ListDomains', params, [('DomainName', Domain)]) + + def create_domain(self, domain_name): + """ + Create a SimpleDB domain. + + :type domain_name: string + :param domain_name: The name of the new domain + + :rtype: :class:`boto.sdb.domain.Domain` object + :return: The newly created domain + """ + params = {'DomainName': domain_name} + d = self.get_object('CreateDomain', params, Domain) + d.name = domain_name + return d + + def get_domain_and_name(self, domain_or_name): + """ + Given a ``str`` or :class:`boto.sdb.domain.Domain`, return a + ``tuple`` with the following members (in order): + + * In instance of :class:`boto.sdb.domain.Domain` for the requested + domain + * The domain's name as a ``str`` + + :type domain_or_name: ``str`` or :class:`boto.sdb.domain.Domain` + :param domain_or_name: The domain or domain name to get the domain + and name for. + + :raises: :class:`boto.exception.SDBResponseError` when an invalid + domain name is specified. + + :rtype: tuple + :return: A ``tuple`` with contents outlined as per above. + """ + if (isinstance(domain_or_name, Domain)): + return (domain_or_name, domain_or_name.name) + else: + return (self.get_domain(domain_or_name), domain_or_name) + + def delete_domain(self, domain_or_name): + """ + Delete a SimpleDB domain. + + .. caution:: This will delete the domain and all items within the domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :rtype: bool + :return: True if successful + + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name} + return self.get_status('DeleteDomain', params) + + def domain_metadata(self, domain_or_name): + """ + Get the Metadata for a SimpleDB domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :rtype: :class:`boto.sdb.domain.DomainMetaData` object + :return: The newly created domain metadata object + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name} + d = self.get_object('DomainMetadata', params, DomainMetaData) + d.domain = domain + return d + + def put_attributes(self, domain_or_name, item_name, attributes, + replace=True, expected_value=None): + """ + Store attributes for a given item in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type item_name: string + :param item_name: The name of the item whose attributes are being + stored. + + :type attribute_names: dict or dict-like object + :param attribute_names: The name/value pairs to store as attributes + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. The list can be + of the form: + + * ['name', 'value'] + + In which case the call will first verify that the attribute "name" + of this item has a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed error will be + returned. The list can also be of the form: + + * ['name', True|False] + + which will simply check for the existence (True) or + non-existence (False) of the attribute. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name, + 'ItemName': item_name} + self._build_name_value_list(params, attributes, replace) + if expected_value: + self._build_expected_value(params, expected_value) + return self.get_status('PutAttributes', params) + + def batch_put_attributes(self, domain_or_name, items, replace=True): + """ + Store attributes for multiple items in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are themselves dictionaries + of attribute names/values, exactly the same as the + attribute_names parameter of the scalar put_attributes + call. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name} + self._build_batch_list(params, items, replace) + return self.get_status('BatchPutAttributes', params, verb='POST') + + def get_attributes(self, domain_or_name, item_name, attribute_names=None, + consistent_read=False, item=None): + """ + Retrieve attributes for a given item in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type item_name: string + :param item_name: The name of the item whose attributes are + being retrieved. + + :type attribute_names: string or list of strings + :param attribute_names: An attribute name or list of attribute names. + This parameter is optional. If not supplied, all attributes will + be retrieved for the item. + + :type consistent_read: bool + :param consistent_read: When set to true, ensures that the most recent + data is returned. + + :type item: :class:`boto.sdb.item.Item` + :keyword item: Instead of instantiating a new Item object, you may + specify one to update. + + :rtype: :class:`boto.sdb.item.Item` + :return: An Item with the requested attribute name/values set on it + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name, + 'ItemName': item_name} + if consistent_read: + params['ConsistentRead'] = 'true' + if attribute_names: + if not isinstance(attribute_names, list): + attribute_names = [attribute_names] + self.build_list_params(params, attribute_names, 'AttributeName') + response = self.make_request('GetAttributes', params) + body = response.read() + if response.status == 200: + if item is None: + item = self.item_cls(domain, item_name) + h = handler.XmlHandler(item, self) + xml.sax.parseString(body, h) + return item + else: + raise SDBResponseError(response.status, response.reason, body) + + def delete_attributes(self, domain_or_name, item_name, attr_names=None, + expected_value=None): + """ + Delete attributes from a given item in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type item_name: string + :param item_name: The name of the item whose attributes are being + deleted. + + :type attributes: dict, list or :class:`boto.sdb.item.Item` + :param attributes: Either a list containing attribute names which + will cause all values associated with that attribute + name to be deleted or a dict or Item containing the + attribute names and keys and list of values to + delete as the value. If no value is supplied, + all attribute name/values for the item will be + deleted. + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. The list can be + of the form: + + * ['name', 'value'] + + In which case the call will first verify that the attribute "name" + of this item has a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed error will be + returned. The list can also be of the form: + + * ['name', True|False] + + which will simply check for the existence (True) or + non-existence (False) of the attribute. + + :rtype: bool + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name, + 'ItemName': item_name} + if attr_names: + if isinstance(attr_names, list): + self._build_name_list(params, attr_names) + elif isinstance(attr_names, dict) or isinstance(attr_names, self.item_cls): + self._build_name_value_list(params, attr_names) + if expected_value: + self._build_expected_value(params, expected_value) + return self.get_status('DeleteAttributes', params) + + def batch_delete_attributes(self, domain_or_name, items): + """ + Delete multiple items in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are either: + + * dictionaries of attribute names/values, exactly the + same as the attribute_names parameter of the scalar + put_attributes call. The attribute name/value pairs + will only be deleted if they match the name/value + pairs passed in. + * None which means that all attributes associated + with the item should be deleted. + + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name} + self._build_batch_list(params, items, False) + return self.get_status('BatchDeleteAttributes', params, verb='POST') + + def select(self, domain_or_name, query='', next_token=None, + consistent_read=False): + """ + Returns a set of Attributes for item names within domain_name that + match the query. The query must be expressed in using the SELECT + style syntax rather than the original SimpleDB query language. + Even though the select request does not require a domain object, + a domain object must be passed into this method so the Item objects + returned can point to the appropriate domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object + :param domain_or_name: Either the name of a domain or a Domain object + + :type query: string + :param query: The SimpleDB query to be performed. + + :type consistent_read: bool + :param consistent_read: When set to true, ensures that the most recent + data is returned. + + :rtype: ResultSet + :return: An iterator containing the results. + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'SelectExpression': query} + if consistent_read: + params['ConsistentRead'] = 'true' + if next_token: + params['NextToken'] = next_token + try: + return self.get_list('Select', params, [('Item', self.item_cls)], + parent=domain) + except SDBResponseError as e: + e.body = "Query: %s\n%s" % (query, e.body) + raise e diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..71f6b7b738b2733e7f6fd51b4c924c7c134d370c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/blob.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/blob.py new file mode 100644 index 0000000000000000000000000000000000000000..6c286ec379f2e774fb1887e957d8a41919e19fcd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/blob.py @@ -0,0 +1,76 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.compat import six + + +class Blob(object): + """Blob object""" + def __init__(self, value=None, file=None, id=None): + self._file = file + self.id = id + self.value = value + + @property + def file(self): + from StringIO import StringIO + if self._file: + f = self._file + else: + f = StringIO(self.value) + return f + + def __str__(self): + return six.text_type(self).encode('utf-8') + + def __unicode__(self): + if hasattr(self.file, "get_contents_as_string"): + value = self.file.get_contents_as_string() + else: + value = self.file.getvalue() + if isinstance(value, six.text_type): + return value + else: + return value.decode('utf-8') + + def read(self): + if hasattr(self.file, "get_contents_as_string"): + return self.file.get_contents_as_string() + else: + return self.file.read() + + def readline(self): + return self.file.readline() + + def next(self): + return next(self.file) + + def __iter__(self): + return iter(self.file) + + @property + def size(self): + if self._file: + return self._file.size + elif self.value: + return len(self.value) + else: + return 0 diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/key.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/key.py new file mode 100644 index 0000000000000000000000000000000000000000..42f6bc9b3ac26cad3e45d0735123a9b8c0e525bf --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/key.py @@ -0,0 +1,59 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Key(object): + + @classmethod + def from_path(cls, *args, **kwds): + raise NotImplementedError("Paths are not currently supported") + + def __init__(self, encoded=None, obj=None): + self.name = None + if obj: + self.id = obj.id + self.kind = obj.kind() + else: + self.id = None + self.kind = None + + def app(self): + raise NotImplementedError("Applications are not currently supported") + + def kind(self): + return self.kind + + def id(self): + return self.id + + def name(self): + raise NotImplementedError("Key Names are not currently supported") + + def id_or_name(self): + return self.id + + def has_id_or_name(self): + return self.id is not None + + def parent(self): + raise NotImplementedError("Key parents are not currently supported") + + def __str__(self): + return self.id_or_name() diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/manager/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/manager/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ded1716cbb6ece931c3276e3524d0a426c2668a9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/manager/__init__.py @@ -0,0 +1,85 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto + + +def get_manager(cls): + """ + Returns the appropriate Manager class for a given Model class. It + does this by looking in the boto config for a section like this:: + + [DB] + db_type = SimpleDB + db_user = + db_passwd = + db_name = my_domain + [DB_TestBasic] + db_type = SimpleDB + db_user = + db_passwd = + db_name = basic_domain + db_port = 1111 + + The values in the DB section are "generic values" that will be used + if nothing more specific is found. You can also create a section for + a specific Model class that gives the db info for that class. + In the example above, TestBasic is a Model subclass. + """ + db_user = boto.config.get('DB', 'db_user', None) + db_passwd = boto.config.get('DB', 'db_passwd', None) + db_type = boto.config.get('DB', 'db_type', 'SimpleDB') + db_name = boto.config.get('DB', 'db_name', None) + db_table = boto.config.get('DB', 'db_table', None) + db_host = boto.config.get('DB', 'db_host', "sdb.amazonaws.com") + db_port = boto.config.getint('DB', 'db_port', 443) + enable_ssl = boto.config.getbool('DB', 'enable_ssl', True) + sql_dir = boto.config.get('DB', 'sql_dir', None) + debug = boto.config.getint('DB', 'debug', 0) + # first see if there is a fully qualified section name in the Boto config + module_name = cls.__module__.replace('.', '_') + db_section = 'DB_' + module_name + '_' + cls.__name__ + if not boto.config.has_section(db_section): + db_section = 'DB_' + cls.__name__ + if boto.config.has_section(db_section): + db_user = boto.config.get(db_section, 'db_user', db_user) + db_passwd = boto.config.get(db_section, 'db_passwd', db_passwd) + db_type = boto.config.get(db_section, 'db_type', db_type) + db_name = boto.config.get(db_section, 'db_name', db_name) + db_table = boto.config.get(db_section, 'db_table', db_table) + db_host = boto.config.get(db_section, 'db_host', db_host) + db_port = boto.config.getint(db_section, 'db_port', db_port) + enable_ssl = boto.config.getint(db_section, 'enable_ssl', enable_ssl) + debug = boto.config.getint(db_section, 'debug', debug) + elif hasattr(cls, "_db_name") and cls._db_name is not None: + # More specific then the generic DB config is any _db_name class property + db_name = cls._db_name + elif hasattr(cls.__bases__[0], "_manager"): + return cls.__bases__[0]._manager + if db_type == 'SimpleDB': + from boto.sdb.db.manager.sdbmanager import SDBManager + return SDBManager(cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, sql_dir, enable_ssl) + elif db_type == 'XML': + from boto.sdb.db.manager.xmlmanager import XMLManager + return XMLManager(cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, sql_dir, enable_ssl) + else: + raise ValueError('Unknown db_type: %s' % db_type) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/manager/sdbmanager.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/manager/sdbmanager.py new file mode 100644 index 0000000000000000000000000000000000000000..d964d07a2d027956ed913d88f5dc92acae5f84f5 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/manager/sdbmanager.py @@ -0,0 +1,738 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +import re +from boto.utils import find_class +import uuid +from boto.sdb.db.key import Key +from boto.sdb.db.blob import Blob +from boto.sdb.db.property import ListProperty, MapProperty +from datetime import datetime, date, time +from boto.exception import SDBPersistenceError, S3ResponseError +from boto.compat import map, six, long_type + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' + + +class TimeDecodeError(Exception): + pass + + +class SDBConverter(object): + """ + Responsible for converting base Python types to format compatible + with underlying database. For SimpleDB, that means everything + needs to be converted to a string when stored in SimpleDB and from + a string when retrieved. + + To convert a value, pass it to the encode or decode method. The + encode method will take a Python native value and convert to DB + format. The decode method will take a DB format value and convert + it to Python native format. To find the appropriate method to + call, the generic encode/decode methods will look for the + type-specific method by searching for a method + called"encode_" or "decode_". + """ + def __init__(self, manager): + # Do a delayed import to prevent possible circular import errors. + from boto.sdb.db.model import Model + self.model_class = Model + self.manager = manager + self.type_map = {bool: (self.encode_bool, self.decode_bool), + int: (self.encode_int, self.decode_int), + float: (self.encode_float, self.decode_float), + self.model_class: ( + self.encode_reference, self.decode_reference + ), + Key: (self.encode_reference, self.decode_reference), + datetime: (self.encode_datetime, self.decode_datetime), + date: (self.encode_date, self.decode_date), + time: (self.encode_time, self.decode_time), + Blob: (self.encode_blob, self.decode_blob), + str: (self.encode_string, self.decode_string), + } + if six.PY2: + self.type_map[long] = (self.encode_long, self.decode_long) + + def encode(self, item_type, value): + try: + if self.model_class in item_type.mro(): + item_type = self.model_class + except: + pass + if item_type in self.type_map: + encode = self.type_map[item_type][0] + return encode(value) + return value + + def decode(self, item_type, value): + if item_type in self.type_map: + decode = self.type_map[item_type][1] + return decode(value) + return value + + def encode_list(self, prop, value): + if value in (None, []): + return [] + if not isinstance(value, list): + # This is a little trick to avoid encoding when it's just a single value, + # since that most likely means it's from a query + item_type = getattr(prop, "item_type") + return self.encode(item_type, value) + # Just enumerate(value) won't work here because + # we need to add in some zero padding + # We support lists up to 1,000 attributes, since + # SDB technically only supports 1024 attributes anyway + values = {} + for k, v in enumerate(value): + values["%03d" % k] = v + return self.encode_map(prop, values) + + def encode_map(self, prop, value): + import urllib + if value is None: + return None + if not isinstance(value, dict): + raise ValueError('Expected a dict value, got %s' % type(value)) + new_value = [] + for key in value: + item_type = getattr(prop, "item_type") + if self.model_class in item_type.mro(): + item_type = self.model_class + encoded_value = self.encode(item_type, value[key]) + if encoded_value is not None: + new_value.append('%s:%s' % (urllib.quote(key), encoded_value)) + return new_value + + def encode_prop(self, prop, value): + if isinstance(prop, ListProperty): + return self.encode_list(prop, value) + elif isinstance(prop, MapProperty): + return self.encode_map(prop, value) + else: + return self.encode(prop.data_type, value) + + def decode_list(self, prop, value): + if not isinstance(value, list): + value = [value] + if hasattr(prop, 'item_type'): + item_type = getattr(prop, "item_type") + dec_val = {} + for val in value: + if val is not None: + k, v = self.decode_map_element(item_type, val) + try: + k = int(k) + except: + k = v + dec_val[k] = v + value = dec_val.values() + return value + + def decode_map(self, prop, value): + if not isinstance(value, list): + value = [value] + ret_value = {} + item_type = getattr(prop, "item_type") + for val in value: + k, v = self.decode_map_element(item_type, val) + ret_value[k] = v + return ret_value + + def decode_map_element(self, item_type, value): + """Decode a single element for a map""" + import urllib + key = value + if ":" in value: + key, value = value.split(':', 1) + key = urllib.unquote(key) + if self.model_class in item_type.mro(): + value = item_type(id=value) + else: + value = self.decode(item_type, value) + return (key, value) + + def decode_prop(self, prop, value): + if isinstance(prop, ListProperty): + return self.decode_list(prop, value) + elif isinstance(prop, MapProperty): + return self.decode_map(prop, value) + else: + return self.decode(prop.data_type, value) + + def encode_int(self, value): + value = int(value) + value += 2147483648 + return '%010d' % value + + def decode_int(self, value): + try: + value = int(value) + except: + boto.log.error("Error, %s is not an integer" % value) + value = 0 + value = int(value) + value -= 2147483648 + return int(value) + + def encode_long(self, value): + value = long_type(value) + value += 9223372036854775808 + return '%020d' % value + + def decode_long(self, value): + value = long_type(value) + value -= 9223372036854775808 + return value + + def encode_bool(self, value): + if value == True or str(value).lower() in ("true", "yes"): + return 'true' + else: + return 'false' + + def decode_bool(self, value): + if value.lower() == 'true': + return True + else: + return False + + def encode_float(self, value): + """ + See http://tools.ietf.org/html/draft-wood-ldapext-float-00. + """ + s = '%e' % value + l = s.split('e') + mantissa = l[0].ljust(18, '0') + exponent = l[1] + if value == 0.0: + case = '3' + exponent = '000' + elif mantissa[0] != '-' and exponent[0] == '+': + case = '5' + exponent = exponent[1:].rjust(3, '0') + elif mantissa[0] != '-' and exponent[0] == '-': + case = '4' + exponent = 999 + int(exponent) + exponent = '%03d' % exponent + elif mantissa[0] == '-' and exponent[0] == '-': + case = '2' + mantissa = '%f' % (10 + float(mantissa)) + mantissa = mantissa.ljust(18, '0') + exponent = exponent[1:].rjust(3, '0') + else: + case = '1' + mantissa = '%f' % (10 + float(mantissa)) + mantissa = mantissa.ljust(18, '0') + exponent = 999 - int(exponent) + exponent = '%03d' % exponent + return '%s %s %s' % (case, exponent, mantissa) + + def decode_float(self, value): + case = value[0] + exponent = value[2:5] + mantissa = value[6:] + if case == '3': + return 0.0 + elif case == '5': + pass + elif case == '4': + exponent = '%03d' % (int(exponent) - 999) + elif case == '2': + mantissa = '%f' % (float(mantissa) - 10) + exponent = '-' + exponent + else: + mantissa = '%f' % (float(mantissa) - 10) + exponent = '%03d' % abs((int(exponent) - 999)) + return float(mantissa + 'e' + exponent) + + def encode_datetime(self, value): + if isinstance(value, six.string_types): + return value + if isinstance(value, datetime): + return value.strftime(ISO8601) + else: + return value.isoformat() + + def decode_datetime(self, value): + """Handles both Dates and DateTime objects""" + if value is None: + return value + try: + if "T" in value: + if "." in value: + # Handle true "isoformat()" dates, which may have a microsecond on at the end of them + return datetime.strptime(value.split(".")[0], "%Y-%m-%dT%H:%M:%S") + else: + return datetime.strptime(value, ISO8601) + else: + value = value.split("-") + return date(int(value[0]), int(value[1]), int(value[2])) + except Exception: + return None + + def encode_date(self, value): + if isinstance(value, six.string_types): + return value + return value.isoformat() + + def decode_date(self, value): + try: + value = value.split("-") + return date(int(value[0]), int(value[1]), int(value[2])) + except: + return None + + encode_time = encode_date + + def decode_time(self, value): + """ converts strings in the form of HH:MM:SS.mmmmmm + (created by datetime.time.isoformat()) to + datetime.time objects. + + Timzone-aware strings ("HH:MM:SS.mmmmmm+HH:MM") won't + be handled right now and will raise TimeDecodeError. + """ + if '-' in value or '+' in value: + # TODO: Handle tzinfo + raise TimeDecodeError("Can't handle timezone aware objects: %r" % value) + tmp = value.split('.') + arg = map(int, tmp[0].split(':')) + if len(tmp) == 2: + arg.append(int(tmp[1])) + return time(*arg) + + def encode_reference(self, value): + if value in (None, 'None', '', ' '): + return None + if isinstance(value, six.string_types): + return value + else: + return value.id + + def decode_reference(self, value): + if not value or value == "None": + return None + return value + + def encode_blob(self, value): + if not value: + return None + if isinstance(value, six.string_types): + return value + + if not value.id: + bucket = self.manager.get_blob_bucket() + key = bucket.new_key(str(uuid.uuid4())) + value.id = "s3://%s/%s" % (key.bucket.name, key.name) + else: + match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value.id) + if match: + s3 = self.manager.get_s3_connection() + bucket = s3.get_bucket(match.group(1), validate=False) + key = bucket.get_key(match.group(2)) + else: + raise SDBPersistenceError("Invalid Blob ID: %s" % value.id) + + if value.value is not None: + key.set_contents_from_string(value.value) + return value.id + + def decode_blob(self, value): + if not value: + return None + match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value) + if match: + s3 = self.manager.get_s3_connection() + bucket = s3.get_bucket(match.group(1), validate=False) + try: + key = bucket.get_key(match.group(2)) + except S3ResponseError as e: + if e.reason != "Forbidden": + raise + return None + else: + return None + if key: + return Blob(file=key, id="s3://%s/%s" % (key.bucket.name, key.name)) + else: + return None + + def encode_string(self, value): + """Convert ASCII, Latin-1 or UTF-8 to pure Unicode""" + if not isinstance(value, str): + return value + try: + return six.text_type(value, 'utf-8') + except: + # really, this should throw an exception. + # in the interest of not breaking current + # systems, however: + arr = [] + for ch in value: + arr.append(six.unichr(ord(ch))) + return u"".join(arr) + + def decode_string(self, value): + """Decoding a string is really nothing, just + return the value as-is""" + return value + + +class SDBManager(object): + + def __init__(self, cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, ddl_dir, enable_ssl, + consistent=None): + self.cls = cls + self.db_name = db_name + self.db_user = db_user + self.db_passwd = db_passwd + self.db_host = db_host + self.db_port = db_port + self.db_table = db_table + self.ddl_dir = ddl_dir + self.enable_ssl = enable_ssl + self.s3 = None + self.bucket = None + self.converter = SDBConverter(self) + self._sdb = None + self._domain = None + if consistent is None and hasattr(cls, "__consistent__"): + consistent = cls.__consistent__ + self.consistent = consistent + + @property + def sdb(self): + if self._sdb is None: + self._connect() + return self._sdb + + @property + def domain(self): + if self._domain is None: + self._connect() + return self._domain + + def _connect(self): + args = dict(aws_access_key_id=self.db_user, + aws_secret_access_key=self.db_passwd, + is_secure=self.enable_ssl) + try: + region = [x for x in boto.sdb.regions() if x.endpoint == self.db_host][0] + args['region'] = region + except IndexError: + pass + self._sdb = boto.connect_sdb(**args) + # This assumes that the domain has already been created + # It's much more efficient to do it this way rather than + # having this make a roundtrip each time to validate. + # The downside is that if the domain doesn't exist, it breaks + self._domain = self._sdb.lookup(self.db_name, validate=False) + if not self._domain: + self._domain = self._sdb.create_domain(self.db_name) + + def _object_lister(self, cls, query_lister): + for item in query_lister: + obj = self.get_object(cls, item.name, item) + if obj: + yield obj + + def encode_value(self, prop, value): + if value is None: + return None + if not prop: + return str(value) + return self.converter.encode_prop(prop, value) + + def decode_value(self, prop, value): + return self.converter.decode_prop(prop, value) + + def get_s3_connection(self): + if not self.s3: + self.s3 = boto.connect_s3(self.db_user, self.db_passwd) + return self.s3 + + def get_blob_bucket(self, bucket_name=None): + s3 = self.get_s3_connection() + bucket_name = "%s-%s" % (s3.aws_access_key_id, self.domain.name) + bucket_name = bucket_name.lower() + try: + self.bucket = s3.get_bucket(bucket_name) + except: + self.bucket = s3.create_bucket(bucket_name) + return self.bucket + + def load_object(self, obj): + if not obj._loaded: + a = self.domain.get_attributes(obj.id, consistent_read=self.consistent) + if '__type__' in a: + for prop in obj.properties(hidden=False): + if prop.name in a: + value = self.decode_value(prop, a[prop.name]) + value = prop.make_value_from_datastore(value) + try: + setattr(obj, prop.name, value) + except Exception as e: + boto.log.exception(e) + obj._loaded = True + + def get_object(self, cls, id, a=None): + obj = None + if not a: + a = self.domain.get_attributes(id, consistent_read=self.consistent) + if '__type__' in a: + if not cls or a['__type__'] != cls.__name__: + cls = find_class(a['__module__'], a['__type__']) + if cls: + params = {} + for prop in cls.properties(hidden=False): + if prop.name in a: + value = self.decode_value(prop, a[prop.name]) + value = prop.make_value_from_datastore(value) + params[prop.name] = value + obj = cls(id, **params) + obj._loaded = True + else: + s = '(%s) class %s.%s not found' % (id, a['__module__'], a['__type__']) + boto.log.info('sdbmanager: %s' % s) + return obj + + def get_object_from_id(self, id): + return self.get_object(None, id) + + def query(self, query): + query_str = "select * from `%s` %s" % (self.domain.name, self._build_filter_part(query.model_class, query.filters, query.sort_by, query.select)) + if query.limit: + query_str += " limit %s" % query.limit + rs = self.domain.select(query_str, max_items=query.limit, next_token=query.next_token) + query.rs = rs + return self._object_lister(query.model_class, rs) + + def count(self, cls, filters, quick=True, sort_by=None, select=None): + """ + Get the number of results that would + be returned in this query + """ + query = "select count(*) from `%s` %s" % (self.domain.name, self._build_filter_part(cls, filters, sort_by, select)) + count = 0 + for row in self.domain.select(query): + count += int(row['Count']) + if quick: + return count + return count + + def _build_filter(self, property, name, op, val): + if name == "__id__": + name = 'itemName()' + if name != "itemName()": + name = '`%s`' % name + if val is None: + if op in ('is', '='): + return "%(name)s is null" % {"name": name} + elif op in ('is not', '!='): + return "%s is not null" % name + else: + val = "" + if property.__class__ == ListProperty: + if op in ("is", "="): + op = "like" + elif op in ("!=", "not"): + op = "not like" + if not(op in ["like", "not like"] and val.startswith("%")): + val = "%%:%s" % val + return "%s %s '%s'" % (name, op, val.replace("'", "''")) + + def _build_filter_part(self, cls, filters, order_by=None, select=None): + """ + Build the filter part + """ + import types + query_parts = [] + + order_by_filtered = False + + if order_by: + if order_by[0] == "-": + order_by_method = "DESC" + order_by = order_by[1:] + else: + order_by_method = "ASC" + + if select: + if order_by and order_by in select: + order_by_filtered = True + query_parts.append("(%s)" % select) + + if isinstance(filters, six.string_types): + query = "WHERE %s AND `__type__` = '%s'" % (filters, cls.__name__) + if order_by in ["__id__", "itemName()"]: + query += " ORDER BY itemName() %s" % order_by_method + elif order_by is not None: + query += " ORDER BY `%s` %s" % (order_by, order_by_method) + return query + + for filter in filters: + filter_parts = [] + filter_props = filter[0] + if not isinstance(filter_props, list): + filter_props = [filter_props] + for filter_prop in filter_props: + (name, op) = filter_prop.strip().split(" ", 1) + value = filter[1] + property = cls.find_property(name) + if name == order_by: + order_by_filtered = True + if types.TypeType(value) == list: + filter_parts_sub = [] + for val in value: + val = self.encode_value(property, val) + if isinstance(val, list): + for v in val: + filter_parts_sub.append(self._build_filter(property, name, op, v)) + else: + filter_parts_sub.append(self._build_filter(property, name, op, val)) + filter_parts.append("(%s)" % (" OR ".join(filter_parts_sub))) + else: + val = self.encode_value(property, value) + if isinstance(val, list): + for v in val: + filter_parts.append(self._build_filter(property, name, op, v)) + else: + filter_parts.append(self._build_filter(property, name, op, val)) + query_parts.append("(%s)" % (" or ".join(filter_parts))) + + + type_query = "(`__type__` = '%s'" % cls.__name__ + for subclass in self._get_all_decendents(cls).keys(): + type_query += " or `__type__` = '%s'" % subclass + type_query += ")" + query_parts.append(type_query) + + order_by_query = "" + + if order_by: + if not order_by_filtered: + query_parts.append("`%s` LIKE '%%'" % order_by) + if order_by in ["__id__", "itemName()"]: + order_by_query = " ORDER BY itemName() %s" % order_by_method + else: + order_by_query = " ORDER BY `%s` %s" % (order_by, order_by_method) + + if len(query_parts) > 0: + return "WHERE %s %s" % (" AND ".join(query_parts), order_by_query) + else: + return "" + + + def _get_all_decendents(self, cls): + """Get all decendents for a given class""" + decendents = {} + for sc in cls.__sub_classes__: + decendents[sc.__name__] = sc + decendents.update(self._get_all_decendents(sc)) + return decendents + + def query_gql(self, query_string, *args, **kwds): + raise NotImplementedError("GQL queries not supported in SimpleDB") + + def save_object(self, obj, expected_value=None): + if not obj.id: + obj.id = str(uuid.uuid4()) + + attrs = {'__type__': obj.__class__.__name__, + '__module__': obj.__class__.__module__, + '__lineage__': obj.get_lineage()} + del_attrs = [] + for property in obj.properties(hidden=False): + value = property.get_value_for_datastore(obj) + if value is not None: + value = self.encode_value(property, value) + if value == []: + value = None + if value is None: + del_attrs.append(property.name) + continue + attrs[property.name] = value + if property.unique: + try: + args = {property.name: value} + obj2 = next(obj.find(**args)) + if obj2.id != obj.id: + raise SDBPersistenceError("Error: %s must be unique!" % property.name) + except(StopIteration): + pass + # Convert the Expected value to SDB format + if expected_value: + prop = obj.find_property(expected_value[0]) + v = expected_value[1] + if v is not None and not isinstance(v, bool): + v = self.encode_value(prop, v) + expected_value[1] = v + self.domain.put_attributes(obj.id, attrs, replace=True, expected_value=expected_value) + if len(del_attrs) > 0: + self.domain.delete_attributes(obj.id, del_attrs) + return obj + + def delete_object(self, obj): + self.domain.delete_attributes(obj.id) + + def set_property(self, prop, obj, name, value): + setattr(obj, name, value) + value = prop.get_value_for_datastore(obj) + value = self.encode_value(prop, value) + if prop.unique: + try: + args = {prop.name: value} + obj2 = next(obj.find(**args)) + if obj2.id != obj.id: + raise SDBPersistenceError("Error: %s must be unique!" % prop.name) + except(StopIteration): + pass + self.domain.put_attributes(obj.id, {name: value}, replace=True) + + def get_property(self, prop, obj, name): + a = self.domain.get_attributes(obj.id, consistent_read=self.consistent) + + # try to get the attribute value from SDB + if name in a: + value = self.decode_value(prop, a[name]) + value = prop.make_value_from_datastore(value) + setattr(obj, prop.name, value) + return value + raise AttributeError('%s not found' % name) + + def set_key_value(self, obj, name, value): + self.domain.put_attributes(obj.id, {name: value}, replace=True) + + def delete_key_value(self, obj, name): + self.domain.delete_attributes(obj.id, name) + + def get_key_value(self, obj, name): + a = self.domain.get_attributes(obj.id, name, consistent_read=self.consistent) + if name in a: + return a[name] + else: + return None + + def get_raw_item(self, obj): + return self.domain.get_item(obj.id) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/manager/xmlmanager.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/manager/xmlmanager.py new file mode 100644 index 0000000000000000000000000000000000000000..f457347ad376d0cf5fef4b2e6926d1f115ae1e8d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/manager/xmlmanager.py @@ -0,0 +1,517 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +from boto.utils import find_class, Password +from boto.sdb.db.key import Key +from boto.sdb.db.model import Model +from boto.compat import six, encodebytes +from datetime import datetime +from xml.dom.minidom import getDOMImplementation, parse, parseString, Node + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' + +class XMLConverter(object): + """ + Responsible for converting base Python types to format compatible with underlying + database. For SimpleDB, that means everything needs to be converted to a string + when stored in SimpleDB and from a string when retrieved. + + To convert a value, pass it to the encode or decode method. The encode method + will take a Python native value and convert to DB format. The decode method will + take a DB format value and convert it to Python native format. To find the appropriate + method to call, the generic encode/decode methods will look for the type-specific + method by searching for a method called "encode_" or "decode_". + """ + def __init__(self, manager): + self.manager = manager + self.type_map = { bool : (self.encode_bool, self.decode_bool), + int : (self.encode_int, self.decode_int), + Model : (self.encode_reference, self.decode_reference), + Key : (self.encode_reference, self.decode_reference), + Password : (self.encode_password, self.decode_password), + datetime : (self.encode_datetime, self.decode_datetime)} + if six.PY2: + self.type_map[long] = (self.encode_long, self.decode_long) + + def get_text_value(self, parent_node): + value = '' + for node in parent_node.childNodes: + if node.nodeType == node.TEXT_NODE: + value += node.data + return value + + def encode(self, item_type, value): + if item_type in self.type_map: + encode = self.type_map[item_type][0] + return encode(value) + return value + + def decode(self, item_type, value): + if item_type in self.type_map: + decode = self.type_map[item_type][1] + return decode(value) + else: + value = self.get_text_value(value) + return value + + def encode_prop(self, prop, value): + if isinstance(value, list): + if hasattr(prop, 'item_type'): + new_value = [] + for v in value: + item_type = getattr(prop, "item_type") + if Model in item_type.mro(): + item_type = Model + new_value.append(self.encode(item_type, v)) + return new_value + else: + return value + else: + return self.encode(prop.data_type, value) + + def decode_prop(self, prop, value): + if prop.data_type == list: + if hasattr(prop, 'item_type'): + item_type = getattr(prop, "item_type") + if Model in item_type.mro(): + item_type = Model + values = [] + for item_node in value.getElementsByTagName('item'): + value = self.decode(item_type, item_node) + values.append(value) + return values + else: + return self.get_text_value(value) + else: + return self.decode(prop.data_type, value) + + def encode_int(self, value): + value = int(value) + return '%d' % value + + def decode_int(self, value): + value = self.get_text_value(value) + if value: + value = int(value) + else: + value = None + return value + + def encode_long(self, value): + value = long(value) + return '%d' % value + + def decode_long(self, value): + value = self.get_text_value(value) + return long(value) + + def encode_bool(self, value): + if value == True: + return 'true' + else: + return 'false' + + def decode_bool(self, value): + value = self.get_text_value(value) + if value.lower() == 'true': + return True + else: + return False + + def encode_datetime(self, value): + return value.strftime(ISO8601) + + def decode_datetime(self, value): + value = self.get_text_value(value) + try: + return datetime.strptime(value, ISO8601) + except: + return None + + def encode_reference(self, value): + if isinstance(value, six.string_types): + return value + if value is None: + return '' + else: + val_node = self.manager.doc.createElement("object") + val_node.setAttribute('id', value.id) + val_node.setAttribute('class', '%s.%s' % (value.__class__.__module__, value.__class__.__name__)) + return val_node + + def decode_reference(self, value): + if not value: + return None + try: + value = value.childNodes[0] + class_name = value.getAttribute("class") + id = value.getAttribute("id") + cls = find_class(class_name) + return cls.get_by_ids(id) + except: + return None + + def encode_password(self, value): + if value and len(value) > 0: + return str(value) + else: + return None + + def decode_password(self, value): + value = self.get_text_value(value) + return Password(value) + + +class XMLManager(object): + + def __init__(self, cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, ddl_dir, enable_ssl): + self.cls = cls + if not db_name: + db_name = cls.__name__.lower() + self.db_name = db_name + self.db_user = db_user + self.db_passwd = db_passwd + self.db_host = db_host + self.db_port = db_port + self.db_table = db_table + self.ddl_dir = ddl_dir + self.s3 = None + self.converter = XMLConverter(self) + self.impl = getDOMImplementation() + self.doc = self.impl.createDocument(None, 'objects', None) + + self.connection = None + self.enable_ssl = enable_ssl + self.auth_header = None + if self.db_user: + base64string = encodebytes('%s:%s' % (self.db_user, self.db_passwd))[:-1] + authheader = "Basic %s" % base64string + self.auth_header = authheader + + def _connect(self): + if self.db_host: + if self.enable_ssl: + from httplib import HTTPSConnection as Connection + else: + from httplib import HTTPConnection as Connection + + self.connection = Connection(self.db_host, self.db_port) + + def _make_request(self, method, url, post_data=None, body=None): + """ + Make a request on this connection + """ + if not self.connection: + self._connect() + try: + self.connection.close() + except: + pass + self.connection.connect() + headers = {} + if self.auth_header: + headers["Authorization"] = self.auth_header + self.connection.request(method, url, body, headers) + resp = self.connection.getresponse() + return resp + + def new_doc(self): + return self.impl.createDocument(None, 'objects', None) + + def _object_lister(self, cls, doc): + for obj_node in doc.getElementsByTagName('object'): + if not cls: + class_name = obj_node.getAttribute('class') + cls = find_class(class_name) + id = obj_node.getAttribute('id') + obj = cls(id) + for prop_node in obj_node.getElementsByTagName('property'): + prop_name = prop_node.getAttribute('name') + prop = obj.find_property(prop_name) + if prop: + if hasattr(prop, 'item_type'): + value = self.get_list(prop_node, prop.item_type) + else: + value = self.decode_value(prop, prop_node) + value = prop.make_value_from_datastore(value) + setattr(obj, prop.name, value) + yield obj + + def reset(self): + self._connect() + + def get_doc(self): + return self.doc + + def encode_value(self, prop, value): + return self.converter.encode_prop(prop, value) + + def decode_value(self, prop, value): + return self.converter.decode_prop(prop, value) + + def get_s3_connection(self): + if not self.s3: + self.s3 = boto.connect_s3(self.aws_access_key_id, self.aws_secret_access_key) + return self.s3 + + def get_list(self, prop_node, item_type): + values = [] + try: + items_node = prop_node.getElementsByTagName('items')[0] + except: + return [] + for item_node in items_node.getElementsByTagName('item'): + value = self.converter.decode(item_type, item_node) + values.append(value) + return values + + def get_object_from_doc(self, cls, id, doc): + obj_node = doc.getElementsByTagName('object')[0] + if not cls: + class_name = obj_node.getAttribute('class') + cls = find_class(class_name) + if not id: + id = obj_node.getAttribute('id') + obj = cls(id) + for prop_node in obj_node.getElementsByTagName('property'): + prop_name = prop_node.getAttribute('name') + prop = obj.find_property(prop_name) + value = self.decode_value(prop, prop_node) + value = prop.make_value_from_datastore(value) + if value is not None: + try: + setattr(obj, prop.name, value) + except: + pass + return obj + + def get_props_from_doc(self, cls, id, doc): + """ + Pull out the properties from this document + Returns the class, the properties in a hash, and the id if provided as a tuple + :return: (cls, props, id) + """ + obj_node = doc.getElementsByTagName('object')[0] + if not cls: + class_name = obj_node.getAttribute('class') + cls = find_class(class_name) + if not id: + id = obj_node.getAttribute('id') + props = {} + for prop_node in obj_node.getElementsByTagName('property'): + prop_name = prop_node.getAttribute('name') + prop = cls.find_property(prop_name) + value = self.decode_value(prop, prop_node) + value = prop.make_value_from_datastore(value) + if value is not None: + props[prop.name] = value + return (cls, props, id) + + + def get_object(self, cls, id): + if not self.connection: + self._connect() + + if not self.connection: + raise NotImplementedError("Can't query without a database connection") + url = "/%s/%s" % (self.db_name, id) + resp = self._make_request('GET', url) + if resp.status == 200: + doc = parse(resp) + else: + raise Exception("Error: %s" % resp.status) + return self.get_object_from_doc(cls, id, doc) + + def query(self, cls, filters, limit=None, order_by=None): + if not self.connection: + self._connect() + + if not self.connection: + raise NotImplementedError("Can't query without a database connection") + + from urllib import urlencode + + query = str(self._build_query(cls, filters, limit, order_by)) + if query: + url = "/%s?%s" % (self.db_name, urlencode({"query": query})) + else: + url = "/%s" % self.db_name + resp = self._make_request('GET', url) + if resp.status == 200: + doc = parse(resp) + else: + raise Exception("Error: %s" % resp.status) + return self._object_lister(cls, doc) + + def _build_query(self, cls, filters, limit, order_by): + import types + if len(filters) > 4: + raise Exception('Too many filters, max is 4') + parts = [] + properties = cls.properties(hidden=False) + for filter, value in filters: + name, op = filter.strip().split() + found = False + for property in properties: + if property.name == name: + found = True + if types.TypeType(value) == list: + filter_parts = [] + for val in value: + val = self.encode_value(property, val) + filter_parts.append("'%s' %s '%s'" % (name, op, val)) + parts.append("[%s]" % " OR ".join(filter_parts)) + else: + value = self.encode_value(property, value) + parts.append("['%s' %s '%s']" % (name, op, value)) + if not found: + raise Exception('%s is not a valid field' % name) + if order_by: + if order_by.startswith("-"): + key = order_by[1:] + type = "desc" + else: + key = order_by + type = "asc" + parts.append("['%s' starts-with ''] sort '%s' %s" % (key, key, type)) + return ' intersection '.join(parts) + + def query_gql(self, query_string, *args, **kwds): + raise NotImplementedError("GQL queries not supported in XML") + + def save_list(self, doc, items, prop_node): + items_node = doc.createElement('items') + prop_node.appendChild(items_node) + for item in items: + item_node = doc.createElement('item') + items_node.appendChild(item_node) + if isinstance(item, Node): + item_node.appendChild(item) + else: + text_node = doc.createTextNode(item) + item_node.appendChild(text_node) + + def save_object(self, obj, expected_value=None): + """ + Marshal the object and do a PUT + """ + doc = self.marshal_object(obj) + if obj.id: + url = "/%s/%s" % (self.db_name, obj.id) + else: + url = "/%s" % (self.db_name) + resp = self._make_request("PUT", url, body=doc.toxml()) + new_obj = self.get_object_from_doc(obj.__class__, None, parse(resp)) + obj.id = new_obj.id + for prop in obj.properties(): + try: + propname = prop.name + except AttributeError: + propname = None + if propname: + value = getattr(new_obj, prop.name) + if value: + setattr(obj, prop.name, value) + return obj + + + def marshal_object(self, obj, doc=None): + if not doc: + doc = self.new_doc() + if not doc: + doc = self.doc + obj_node = doc.createElement('object') + + if obj.id: + obj_node.setAttribute('id', obj.id) + + obj_node.setAttribute('class', '%s.%s' % (obj.__class__.__module__, + obj.__class__.__name__)) + root = doc.documentElement + root.appendChild(obj_node) + for property in obj.properties(hidden=False): + prop_node = doc.createElement('property') + prop_node.setAttribute('name', property.name) + prop_node.setAttribute('type', property.type_name) + value = property.get_value_for_datastore(obj) + if value is not None: + value = self.encode_value(property, value) + if isinstance(value, list): + self.save_list(doc, value, prop_node) + elif isinstance(value, Node): + prop_node.appendChild(value) + else: + text_node = doc.createTextNode(six.text_type(value).encode("ascii", "ignore")) + prop_node.appendChild(text_node) + obj_node.appendChild(prop_node) + + return doc + + def unmarshal_object(self, fp, cls=None, id=None): + if isinstance(fp, six.string_types): + doc = parseString(fp) + else: + doc = parse(fp) + return self.get_object_from_doc(cls, id, doc) + + def unmarshal_props(self, fp, cls=None, id=None): + """ + Same as unmarshalling an object, except it returns + from "get_props_from_doc" + """ + if isinstance(fp, six.string_types): + doc = parseString(fp) + else: + doc = parse(fp) + return self.get_props_from_doc(cls, id, doc) + + def delete_object(self, obj): + url = "/%s/%s" % (self.db_name, obj.id) + return self._make_request("DELETE", url) + + def set_key_value(self, obj, name, value): + self.domain.put_attributes(obj.id, {name: value}, replace=True) + + def delete_key_value(self, obj, name): + self.domain.delete_attributes(obj.id, name) + + def get_key_value(self, obj, name): + a = self.domain.get_attributes(obj.id, name) + if name in a: + return a[name] + else: + return None + + def get_raw_item(self, obj): + return self.domain.get_item(obj.id) + + def set_property(self, prop, obj, name, value): + pass + + def get_property(self, prop, obj, name): + pass + + def load_object(self, obj): + if not obj._loaded: + obj = obj.get_by_id(obj.id) + obj._loaded = True + return obj diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/model.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/model.py new file mode 100644 index 0000000000000000000000000000000000000000..741ad4387143c97291271a7b2e4d02b3d675f32c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/model.py @@ -0,0 +1,296 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.sdb.db.property import Property +from boto.sdb.db.key import Key +from boto.sdb.db.query import Query +import boto +from boto.compat import filter + +class ModelMeta(type): + "Metaclass for all Models" + + def __init__(cls, name, bases, dict): + super(ModelMeta, cls).__init__(name, bases, dict) + # Make sure this is a subclass of Model - mainly copied from django ModelBase (thanks!) + cls.__sub_classes__ = [] + + # Do a delayed import to prevent possible circular import errors. + from boto.sdb.db.manager import get_manager + + try: + if filter(lambda b: issubclass(b, Model), bases): + for base in bases: + base.__sub_classes__.append(cls) + cls._manager = get_manager(cls) + # look for all of the Properties and set their names + for key in dict.keys(): + if isinstance(dict[key], Property): + property = dict[key] + property.__property_config__(cls, key) + prop_names = [] + props = cls.properties() + for prop in props: + if not prop.__class__.__name__.startswith('_'): + prop_names.append(prop.name) + setattr(cls, '_prop_names', prop_names) + except NameError: + # 'Model' isn't defined yet, meaning we're looking at our own + # Model class, defined below. + pass + +class Model(object): + __metaclass__ = ModelMeta + __consistent__ = False # Consistent is set off by default + id = None + + @classmethod + def get_lineage(cls): + l = [c.__name__ for c in cls.mro()] + l.reverse() + return '.'.join(l) + + @classmethod + def kind(cls): + return cls.__name__ + + @classmethod + def _get_by_id(cls, id, manager=None): + if not manager: + manager = cls._manager + return manager.get_object(cls, id) + + @classmethod + def get_by_id(cls, ids=None, parent=None): + if isinstance(ids, list): + objs = [cls._get_by_id(id) for id in ids] + return objs + else: + return cls._get_by_id(ids) + + get_by_ids = get_by_id + + @classmethod + def get_by_key_name(cls, key_names, parent=None): + raise NotImplementedError("Key Names are not currently supported") + + @classmethod + def find(cls, limit=None, next_token=None, **params): + q = Query(cls, limit=limit, next_token=next_token) + for key, value in params.items(): + q.filter('%s =' % key, value) + return q + + @classmethod + def all(cls, limit=None, next_token=None): + return cls.find(limit=limit, next_token=next_token) + + @classmethod + def get_or_insert(key_name, **kw): + raise NotImplementedError("get_or_insert not currently supported") + + @classmethod + def properties(cls, hidden=True): + properties = [] + while cls: + for key in cls.__dict__.keys(): + prop = cls.__dict__[key] + if isinstance(prop, Property): + if hidden or not prop.__class__.__name__.startswith('_'): + properties.append(prop) + if len(cls.__bases__) > 0: + cls = cls.__bases__[0] + else: + cls = None + return properties + + @classmethod + def find_property(cls, prop_name): + property = None + while cls: + for key in cls.__dict__.keys(): + prop = cls.__dict__[key] + if isinstance(prop, Property): + if not prop.__class__.__name__.startswith('_') and prop_name == prop.name: + property = prop + if len(cls.__bases__) > 0: + cls = cls.__bases__[0] + else: + cls = None + return property + + @classmethod + def get_xmlmanager(cls): + if not hasattr(cls, '_xmlmanager'): + from boto.sdb.db.manager.xmlmanager import XMLManager + cls._xmlmanager = XMLManager(cls, None, None, None, + None, None, None, None, False) + return cls._xmlmanager + + @classmethod + def from_xml(cls, fp): + xmlmanager = cls.get_xmlmanager() + return xmlmanager.unmarshal_object(fp) + + def __init__(self, id=None, **kw): + self._loaded = False + # first try to initialize all properties to their default values + for prop in self.properties(hidden=False): + try: + setattr(self, prop.name, prop.default_value()) + except ValueError: + pass + if 'manager' in kw: + self._manager = kw['manager'] + self.id = id + for key in kw: + if key != 'manager': + # We don't want any errors populating up when loading an object, + # so if it fails we just revert to it's default value + try: + setattr(self, key, kw[key]) + except Exception as e: + boto.log.exception(e) + + def __repr__(self): + return '%s<%s>' % (self.__class__.__name__, self.id) + + def __str__(self): + return str(self.id) + + def __eq__(self, other): + return other and isinstance(other, Model) and self.id == other.id + + def _get_raw_item(self): + return self._manager.get_raw_item(self) + + def load(self): + if self.id and not self._loaded: + self._manager.load_object(self) + + def reload(self): + if self.id: + self._loaded = False + self._manager.load_object(self) + + def put(self, expected_value=None): + """ + Save this object as it is, with an optional expected value + + :param expected_value: Optional tuple of Attribute, and Value that + must be the same in order to save this object. If this + condition is not met, an SDBResponseError will be raised with a + Confict status code. + :type expected_value: tuple or list + :return: This object + :rtype: :class:`boto.sdb.db.model.Model` + """ + self._manager.save_object(self, expected_value) + return self + + save = put + + def put_attributes(self, attrs): + """ + Save just these few attributes, not the whole object + + :param attrs: Attributes to save, key->value dict + :type attrs: dict + :return: self + :rtype: :class:`boto.sdb.db.model.Model` + """ + assert(isinstance(attrs, dict)), "Argument must be a dict of key->values to save" + for prop_name in attrs: + value = attrs[prop_name] + prop = self.find_property(prop_name) + assert(prop), "Property not found: %s" % prop_name + self._manager.set_property(prop, self, prop_name, value) + self.reload() + return self + + def delete_attributes(self, attrs): + """ + Delete just these attributes, not the whole object. + + :param attrs: Attributes to save, as a list of string names + :type attrs: list + :return: self + :rtype: :class:`boto.sdb.db.model.Model` + """ + assert(isinstance(attrs, list)), "Argument must be a list of names of keys to delete." + self._manager.domain.delete_attributes(self.id, attrs) + self.reload() + return self + + save_attributes = put_attributes + + def delete(self): + self._manager.delete_object(self) + + def key(self): + return Key(obj=self) + + def set_manager(self, manager): + self._manager = manager + + def to_dict(self): + props = {} + for prop in self.properties(hidden=False): + props[prop.name] = getattr(self, prop.name) + obj = {'properties': props, + 'id': self.id} + return {self.__class__.__name__: obj} + + def to_xml(self, doc=None): + xmlmanager = self.get_xmlmanager() + doc = xmlmanager.marshal_object(self, doc) + return doc + + @classmethod + def find_subclass(cls, name): + """Find a subclass with a given name""" + if name == cls.__name__: + return cls + for sc in cls.__sub_classes__: + r = sc.find_subclass(name) + if r is not None: + return r + +class Expando(Model): + + def __setattr__(self, name, value): + if name in self._prop_names: + object.__setattr__(self, name, value) + elif name.startswith('_'): + object.__setattr__(self, name, value) + elif name == 'id': + object.__setattr__(self, name, value) + else: + self._manager.set_key_value(self, name, value) + object.__setattr__(self, name, value) + + def __getattr__(self, name): + if not name.startswith('_'): + value = self._manager.get_key_value(self, name) + if value: + object.__setattr__(self, name, value) + return value + raise AttributeError diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/property.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/property.py new file mode 100644 index 0000000000000000000000000000000000000000..575aa8924dd7b0b57f18acf0f75448a23a3d299c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/property.py @@ -0,0 +1,704 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import datetime +from boto.sdb.db.key import Key +from boto.utils import Password +from boto.sdb.db.query import Query +import re +import boto +import boto.s3.key +from boto.sdb.db.blob import Blob +from boto.compat import six, long_type + + +class Property(object): + + data_type = str + type_name = '' + name = '' + verbose_name = '' + + def __init__(self, verbose_name=None, name=None, default=None, + required=False, validator=None, choices=None, unique=False): + self.verbose_name = verbose_name + self.name = name + self.default = default + self.required = required + self.validator = validator + self.choices = choices + if self.name: + self.slot_name = '_' + self.name + else: + self.slot_name = '_' + self.unique = unique + + def __get__(self, obj, objtype): + if obj: + obj.load() + return getattr(obj, self.slot_name) + else: + return None + + def __set__(self, obj, value): + self.validate(value) + + # Fire off any on_set functions + try: + if obj._loaded and hasattr(obj, "on_set_%s" % self.name): + fnc = getattr(obj, "on_set_%s" % self.name) + value = fnc(value) + except Exception: + boto.log.exception("Exception running on_set_%s" % self.name) + + setattr(obj, self.slot_name, value) + + def __property_config__(self, model_class, property_name): + self.model_class = model_class + self.name = property_name + self.slot_name = '_' + self.name + + def default_validator(self, value): + if isinstance(value, six.string_types) or value == self.default_value(): + return + if not isinstance(value, self.data_type): + raise TypeError('Validation Error, %s.%s expecting %s, got %s' % (self.model_class.__name__, self.name, self.data_type, type(value))) + + def default_value(self): + return self.default + + def validate(self, value): + if self.required and value is None: + raise ValueError('%s is a required property' % self.name) + if self.choices and value and value not in self.choices: + raise ValueError('%s not a valid choice for %s.%s' % (value, self.model_class.__name__, self.name)) + if self.validator: + self.validator(value) + else: + self.default_validator(value) + return value + + def empty(self, value): + return not value + + def get_value_for_datastore(self, model_instance): + return getattr(model_instance, self.name) + + def make_value_from_datastore(self, value): + return value + + def get_choices(self): + if callable(self.choices): + return self.choices() + return self.choices + + +def validate_string(value): + if value is None: + return + elif isinstance(value, six.string_types): + if len(value) > 1024: + raise ValueError('Length of value greater than maxlength') + else: + raise TypeError('Expecting String, got %s' % type(value)) + + +class StringProperty(Property): + + type_name = 'String' + + def __init__(self, verbose_name=None, name=None, default='', + required=False, validator=validate_string, + choices=None, unique=False): + super(StringProperty, self).__init__(verbose_name, name, default, required, + validator, choices, unique) + + +class TextProperty(Property): + + type_name = 'Text' + + def __init__(self, verbose_name=None, name=None, default='', + required=False, validator=None, choices=None, + unique=False, max_length=None): + super(TextProperty, self).__init__(verbose_name, name, default, required, + validator, choices, unique) + self.max_length = max_length + + def validate(self, value): + value = super(TextProperty, self).validate(value) + if not isinstance(value, six.string_types): + raise TypeError('Expecting Text, got %s' % type(value)) + if self.max_length and len(value) > self.max_length: + raise ValueError('Length of value greater than maxlength %s' % self.max_length) + + +class PasswordProperty(StringProperty): + """ + + Hashed property whose original value can not be + retrieved, but still can be compared. + + Works by storing a hash of the original value instead + of the original value. Once that's done all that + can be retrieved is the hash. + + The comparison + + obj.password == 'foo' + + generates a hash of 'foo' and compares it to the + stored hash. + + Underlying data type for hashing, storing, and comparing + is boto.utils.Password. The default hash function is + defined there ( currently sha512 in most cases, md5 + where sha512 is not available ) + + It's unlikely you'll ever need to use a different hash + function, but if you do, you can control the behavior + in one of two ways: + + 1) Specifying hashfunc in PasswordProperty constructor + + import hashlib + + class MyModel(model): + password = PasswordProperty(hashfunc=hashlib.sha224) + + 2) Subclassing Password and PasswordProperty + + class SHA224Password(Password): + hashfunc=hashlib.sha224 + + class SHA224PasswordProperty(PasswordProperty): + data_type=MyPassword + type_name="MyPassword" + + class MyModel(Model): + password = SHA224PasswordProperty() + + """ + data_type = Password + type_name = 'Password' + + def __init__(self, verbose_name=None, name=None, default='', required=False, + validator=None, choices=None, unique=False, hashfunc=None): + + """ + The hashfunc parameter overrides the default hashfunc in boto.utils.Password. + + The remaining parameters are passed through to StringProperty.__init__""" + + super(PasswordProperty, self).__init__(verbose_name, name, default, required, + validator, choices, unique) + self.hashfunc = hashfunc + + def make_value_from_datastore(self, value): + p = self.data_type(value, hashfunc=self.hashfunc) + return p + + def get_value_for_datastore(self, model_instance): + value = super(PasswordProperty, self).get_value_for_datastore(model_instance) + if value and len(value): + return str(value) + else: + return None + + def __set__(self, obj, value): + if not isinstance(value, self.data_type): + p = self.data_type(hashfunc=self.hashfunc) + p.set(value) + value = p + super(PasswordProperty, self).__set__(obj, value) + + def __get__(self, obj, objtype): + return self.data_type(super(PasswordProperty, self).__get__(obj, objtype), hashfunc=self.hashfunc) + + def validate(self, value): + value = super(PasswordProperty, self).validate(value) + if isinstance(value, self.data_type): + if len(value) > 1024: + raise ValueError('Length of value greater than maxlength') + else: + raise TypeError('Expecting %s, got %s' % (type(self.data_type), type(value))) + + +class BlobProperty(Property): + data_type = Blob + type_name = "blob" + + def __set__(self, obj, value): + if value != self.default_value(): + if not isinstance(value, Blob): + oldb = self.__get__(obj, type(obj)) + id = None + if oldb: + id = oldb.id + b = Blob(value=value, id=id) + value = b + super(BlobProperty, self).__set__(obj, value) + + +class S3KeyProperty(Property): + + data_type = boto.s3.key.Key + type_name = 'S3Key' + validate_regex = "^s3:\/\/([^\/]*)\/(.*)$" + + def __init__(self, verbose_name=None, name=None, default=None, + required=False, validator=None, choices=None, unique=False): + super(S3KeyProperty, self).__init__(verbose_name, name, default, required, + validator, choices, unique) + + def validate(self, value): + value = super(S3KeyProperty, self).validate(value) + if value == self.default_value() or value == str(self.default_value()): + return self.default_value() + if isinstance(value, self.data_type): + return + match = re.match(self.validate_regex, value) + if match: + return + raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value))) + + def __get__(self, obj, objtype): + value = super(S3KeyProperty, self).__get__(obj, objtype) + if value: + if isinstance(value, self.data_type): + return value + match = re.match(self.validate_regex, value) + if match: + s3 = obj._manager.get_s3_connection() + bucket = s3.get_bucket(match.group(1), validate=False) + k = bucket.get_key(match.group(2)) + if not k: + k = bucket.new_key(match.group(2)) + k.set_contents_from_string("") + return k + else: + return value + + def get_value_for_datastore(self, model_instance): + value = super(S3KeyProperty, self).get_value_for_datastore(model_instance) + if value: + return "s3://%s/%s" % (value.bucket.name, value.name) + else: + return None + + +class IntegerProperty(Property): + + data_type = int + type_name = 'Integer' + + def __init__(self, verbose_name=None, name=None, default=0, required=False, + validator=None, choices=None, unique=False, max=2147483647, min=-2147483648): + super(IntegerProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + self.max = max + self.min = min + + def validate(self, value): + value = int(value) + value = super(IntegerProperty, self).validate(value) + if value > self.max: + raise ValueError('Maximum value is %d' % self.max) + if value < self.min: + raise ValueError('Minimum value is %d' % self.min) + return value + + def empty(self, value): + return value is None + + def __set__(self, obj, value): + if value == "" or value is None: + value = 0 + return super(IntegerProperty, self).__set__(obj, value) + + +class LongProperty(Property): + + data_type = long_type + type_name = 'Long' + + def __init__(self, verbose_name=None, name=None, default=0, required=False, + validator=None, choices=None, unique=False): + super(LongProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + + def validate(self, value): + value = long_type(value) + value = super(LongProperty, self).validate(value) + min = -9223372036854775808 + max = 9223372036854775807 + if value > max: + raise ValueError('Maximum value is %d' % max) + if value < min: + raise ValueError('Minimum value is %d' % min) + return value + + def empty(self, value): + return value is None + + +class BooleanProperty(Property): + + data_type = bool + type_name = 'Boolean' + + def __init__(self, verbose_name=None, name=None, default=False, required=False, + validator=None, choices=None, unique=False): + super(BooleanProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + + def empty(self, value): + return value is None + + +class FloatProperty(Property): + + data_type = float + type_name = 'Float' + + def __init__(self, verbose_name=None, name=None, default=0.0, required=False, + validator=None, choices=None, unique=False): + super(FloatProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + + def validate(self, value): + value = float(value) + value = super(FloatProperty, self).validate(value) + return value + + def empty(self, value): + return value is None + + +class DateTimeProperty(Property): + """This class handles both the datetime.datetime object + And the datetime.date objects. It can return either one, + depending on the value stored in the database""" + + data_type = datetime.datetime + type_name = 'DateTime' + + def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None, + default=None, required=False, validator=None, choices=None, unique=False): + super(DateTimeProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + self.auto_now = auto_now + self.auto_now_add = auto_now_add + + def default_value(self): + if self.auto_now or self.auto_now_add: + return self.now() + return super(DateTimeProperty, self).default_value() + + def validate(self, value): + if value is None: + return + if isinstance(value, datetime.date): + return value + return super(DateTimeProperty, self).validate(value) + + def get_value_for_datastore(self, model_instance): + if self.auto_now: + setattr(model_instance, self.name, self.now()) + return super(DateTimeProperty, self).get_value_for_datastore(model_instance) + + def now(self): + return datetime.datetime.utcnow() + + +class DateProperty(Property): + + data_type = datetime.date + type_name = 'Date' + + def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None, + default=None, required=False, validator=None, choices=None, unique=False): + super(DateProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + self.auto_now = auto_now + self.auto_now_add = auto_now_add + + def default_value(self): + if self.auto_now or self.auto_now_add: + return self.now() + return super(DateProperty, self).default_value() + + def validate(self, value): + value = super(DateProperty, self).validate(value) + if value is None: + return + if not isinstance(value, self.data_type): + raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value))) + + def get_value_for_datastore(self, model_instance): + if self.auto_now: + setattr(model_instance, self.name, self.now()) + val = super(DateProperty, self).get_value_for_datastore(model_instance) + if isinstance(val, datetime.datetime): + val = val.date() + return val + + def now(self): + return datetime.date.today() + + +class TimeProperty(Property): + data_type = datetime.time + type_name = 'Time' + + def __init__(self, verbose_name=None, name=None, + default=None, required=False, validator=None, choices=None, unique=False): + super(TimeProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + + def validate(self, value): + value = super(TimeProperty, self).validate(value) + if value is None: + return + if not isinstance(value, self.data_type): + raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value))) + + +class ReferenceProperty(Property): + + data_type = Key + type_name = 'Reference' + + def __init__(self, reference_class=None, collection_name=None, + verbose_name=None, name=None, default=None, required=False, validator=None, choices=None, unique=False): + super(ReferenceProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + self.reference_class = reference_class + self.collection_name = collection_name + + def __get__(self, obj, objtype): + if obj: + value = getattr(obj, self.slot_name) + if value == self.default_value(): + return value + # If the value is still the UUID for the referenced object, we need to create + # the object now that is the attribute has actually been accessed. This lazy + # instantiation saves unnecessary roundtrips to SimpleDB + if isinstance(value, six.string_types): + value = self.reference_class(value) + setattr(obj, self.name, value) + return value + + def __set__(self, obj, value): + """Don't allow this object to be associated to itself + This causes bad things to happen""" + if value is not None and (obj.id == value or (hasattr(value, "id") and obj.id == value.id)): + raise ValueError("Can not associate an object with itself!") + return super(ReferenceProperty, self).__set__(obj, value) + + def __property_config__(self, model_class, property_name): + super(ReferenceProperty, self).__property_config__(model_class, property_name) + if self.collection_name is None: + self.collection_name = '%s_%s_set' % (model_class.__name__.lower(), self.name) + if hasattr(self.reference_class, self.collection_name): + raise ValueError('duplicate property: %s' % self.collection_name) + setattr(self.reference_class, self.collection_name, + _ReverseReferenceProperty(model_class, property_name, self.collection_name)) + + def check_uuid(self, value): + # This does a bit of hand waving to "type check" the string + t = value.split('-') + if len(t) != 5: + raise ValueError + + def check_instance(self, value): + try: + obj_lineage = value.get_lineage() + cls_lineage = self.reference_class.get_lineage() + if obj_lineage.startswith(cls_lineage): + return + raise TypeError('%s not instance of %s' % (obj_lineage, cls_lineage)) + except: + raise ValueError('%s is not a Model' % value) + + def validate(self, value): + if self.validator: + self.validator(value) + if self.required and value is None: + raise ValueError('%s is a required property' % self.name) + if value == self.default_value(): + return + if not isinstance(value, six.string_types): + self.check_instance(value) + + +class _ReverseReferenceProperty(Property): + data_type = Query + type_name = 'query' + + def __init__(self, model, prop, name): + self.__model = model + self.__property = prop + self.collection_name = prop + self.name = name + self.item_type = model + + def __get__(self, model_instance, model_class): + """Fetches collection of model instances of this collection property.""" + if model_instance is not None: + query = Query(self.__model) + if isinstance(self.__property, list): + props = [] + for prop in self.__property: + props.append("%s =" % prop) + return query.filter(props, model_instance) + else: + return query.filter(self.__property + ' =', model_instance) + else: + return self + + def __set__(self, model_instance, value): + """Not possible to set a new collection.""" + raise ValueError('Virtual property is read-only') + + +class CalculatedProperty(Property): + + def __init__(self, verbose_name=None, name=None, default=None, + required=False, validator=None, choices=None, + calculated_type=int, unique=False, use_method=False): + super(CalculatedProperty, self).__init__(verbose_name, name, default, required, + validator, choices, unique) + self.calculated_type = calculated_type + self.use_method = use_method + + def __get__(self, obj, objtype): + value = self.default_value() + if obj: + try: + value = getattr(obj, self.slot_name) + if self.use_method: + value = value() + except AttributeError: + pass + return value + + def __set__(self, obj, value): + """Not possible to set a new AutoID.""" + pass + + def _set_direct(self, obj, value): + if not self.use_method: + setattr(obj, self.slot_name, value) + + def get_value_for_datastore(self, model_instance): + if self.calculated_type in [str, int, bool]: + value = self.__get__(model_instance, model_instance.__class__) + return value + else: + return None + + +class ListProperty(Property): + + data_type = list + type_name = 'List' + + def __init__(self, item_type, verbose_name=None, name=None, default=None, **kwds): + if default is None: + default = [] + self.item_type = item_type + super(ListProperty, self).__init__(verbose_name, name, default=default, required=True, **kwds) + + def validate(self, value): + if self.validator: + self.validator(value) + if value is not None: + if not isinstance(value, list): + value = [value] + + if self.item_type in six.integer_types: + item_type = six.integer_types + elif self.item_type in six.string_types: + item_type = six.string_types + else: + item_type = self.item_type + + for item in value: + if not isinstance(item, item_type): + if item_type == six.integer_types: + raise ValueError('Items in the %s list must all be integers.' % self.name) + else: + raise ValueError('Items in the %s list must all be %s instances' % + (self.name, self.item_type.__name__)) + return value + + def empty(self, value): + return value is None + + def default_value(self): + return list(super(ListProperty, self).default_value()) + + def __set__(self, obj, value): + """Override the set method to allow them to set the property to an instance of the item_type instead of requiring a list to be passed in""" + if self.item_type in six.integer_types: + item_type = six.integer_types + elif self.item_type in six.string_types: + item_type = six.string_types + else: + item_type = self.item_type + if isinstance(value, item_type): + value = [value] + elif value is None: # Override to allow them to set this to "None" to remove everything + value = [] + return super(ListProperty, self).__set__(obj, value) + + +class MapProperty(Property): + + data_type = dict + type_name = 'Map' + + def __init__(self, item_type=str, verbose_name=None, name=None, default=None, **kwds): + if default is None: + default = {} + self.item_type = item_type + super(MapProperty, self).__init__(verbose_name, name, default=default, required=True, **kwds) + + def validate(self, value): + value = super(MapProperty, self).validate(value) + if value is not None: + if not isinstance(value, dict): + raise ValueError('Value must of type dict') + + if self.item_type in six.integer_types: + item_type = six.integer_types + elif self.item_type in six.string_types: + item_type = six.string_types + else: + item_type = self.item_type + + for key in value: + if not isinstance(value[key], item_type): + if item_type == six.integer_types: + raise ValueError('Values in the %s Map must all be integers.' % self.name) + else: + raise ValueError('Values in the %s Map must all be %s instances' % + (self.name, self.item_type.__name__)) + return value + + def empty(self, value): + return value is None + + def default_value(self): + return {} diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/query.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/query.py new file mode 100644 index 0000000000000000000000000000000000000000..8945d4c0aa9c063c4c3e411200f7a4b3313c85a0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/query.py @@ -0,0 +1,86 @@ +from boto.compat import six +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Query(object): + __local_iter__ = None + def __init__(self, model_class, limit=None, next_token=None, manager=None): + self.model_class = model_class + self.limit = limit + self.offset = 0 + if manager: + self.manager = manager + else: + self.manager = self.model_class._manager + self.filters = [] + self.select = None + self.sort_by = None + self.rs = None + self.next_token = next_token + + def __iter__(self): + return iter(self.manager.query(self)) + + def next(self): + if self.__local_iter__ is None: + self.__local_iter__ = self.__iter__() + return next(self.__local_iter__) + + def filter(self, property_operator, value): + self.filters.append((property_operator, value)) + return self + + def fetch(self, limit, offset=0): + """Not currently fully supported, but we can use this + to allow them to set a limit in a chainable method""" + self.limit = limit + self.offset = offset + return self + + def count(self, quick=True): + return self.manager.count(self.model_class, self.filters, quick, self.sort_by, self.select) + + def get_query(self): + return self.manager._build_filter_part(self.model_class, self.filters, self.sort_by, self.select) + + def order(self, key): + self.sort_by = key + return self + + def to_xml(self, doc=None): + if not doc: + xmlmanager = self.model_class.get_xmlmanager() + doc = xmlmanager.new_doc() + for obj in self: + obj.to_xml(doc) + return doc + + def get_next_token(self): + if self.rs: + return self.rs.next_token + if self._next_token: + return self._next_token + return None + + def set_next_token(self, token): + self._next_token = token + + next_token = property(get_next_token, set_next_token) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/sequence.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..a28798930c01e5b93d6bb0200a6c48af5b59e5b1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/sequence.py @@ -0,0 +1,224 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import SDBResponseError +from boto.compat import six + +class SequenceGenerator(object): + """Generic Sequence Generator object, this takes a single + string as the "sequence" and uses that to figure out + what the next value in a string is. For example + if you give "ABC" and pass in "A" it will give you "B", + and if you give it "C" it will give you "AA". + + If you set "rollover" to True in the above example, passing + in "C" would give you "A" again. + + The Sequence string can be a string or any iterable + that has the "index" function and is indexable. + """ + __name__ = "SequenceGenerator" + + def __init__(self, sequence_string, rollover=False): + """Create a new SequenceGenerator using the sequence_string + as how to generate the next item. + + :param sequence_string: The string or list that explains + how to generate the next item in the sequence + :type sequence_string: str,iterable + + :param rollover: Rollover instead of incrementing when + we hit the end of the sequence + :type rollover: bool + """ + self.sequence_string = sequence_string + self.sequence_length = len(sequence_string[0]) + self.rollover = rollover + self.last_item = sequence_string[-1] + self.__name__ = "%s('%s')" % (self.__class__.__name__, sequence_string) + + def __call__(self, val, last=None): + """Get the next value in the sequence""" + # If they pass us in a string that's not at least + # the lenght of our sequence, then return the + # first element in our sequence + if val is None or len(val) < self.sequence_length: + return self.sequence_string[0] + last_value = val[-self.sequence_length:] + if (not self.rollover) and (last_value == self.last_item): + val = "%s%s" % (self(val[:-self.sequence_length]), self._inc(last_value)) + else: + val = "%s%s" % (val[:-self.sequence_length], self._inc(last_value)) + return val + + def _inc(self, val): + """Increment a single value""" + assert(len(val) == self.sequence_length) + return self.sequence_string[(self.sequence_string.index(val) + 1) % len(self.sequence_string)] + + +# +# Simple Sequence Functions +# +def increment_by_one(cv=None, lv=None): + if cv is None: + return 0 + return cv + 1 + +def double(cv=None, lv=None): + if cv is None: + return 1 + return cv * 2 + +def fib(cv=1, lv=0): + """The fibonacci sequence, this incrementer uses the + last value""" + if cv is None: + cv = 1 + if lv is None: + lv = 0 + return cv + lv + +increment_string = SequenceGenerator("ABCDEFGHIJKLMNOPQRSTUVWXYZ") + + +class Sequence(object): + """A simple Sequence using the new SDB "Consistent" features + Based largly off of the "Counter" example from mitch garnaat: + http://bitbucket.org/mitch/stupidbototricks/src/tip/counter.py""" + + def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None): + """Create a new Sequence, using an optional function to + increment to the next number, by default we just increment by one. + Every parameter here is optional, if you don't specify any options + then you'll get a new SequenceGenerator with a random ID stored in the + default domain that increments by one and uses the default botoweb + environment + + :param id: Optional ID (name) for this counter + :type id: str + + :param domain_name: Optional domain name to use, by default we get this out of the + environment configuration + :type domain_name:str + + :param fnc: Optional function to use for the incrementation, by default we just increment by one + There are several functions defined in this module. + Your function must accept "None" to get the initial value + :type fnc: function, str + + :param init_val: Initial value, by default this is the first element in your sequence, + but you can pass in any value, even a string if you pass in a function that uses + strings instead of ints to increment + """ + self._db = None + self._value = None + self.last_value = None + self.domain_name = domain_name + self.id = id + if init_val is None: + init_val = fnc(init_val) + + if self.id is None: + import uuid + self.id = str(uuid.uuid4()) + + self.item_type = type(fnc(None)) + self.timestamp = None + # Allow us to pass in a full name to a function + if isinstance(fnc, six.string_types): + from boto.utils import find_class + fnc = find_class(fnc) + self.fnc = fnc + + # Bootstrap the value last + if not self.val: + self.val = init_val + + def set(self, val): + """Set the value""" + import time + now = time.time() + expected_value = [] + new_val = {} + new_val['timestamp'] = now + if self._value is not None: + new_val['last_value'] = self._value + expected_value = ['current_value', str(self._value)] + new_val['current_value'] = val + try: + self.db.put_attributes(self.id, new_val, expected_value=expected_value) + self.timestamp = new_val['timestamp'] + except SDBResponseError as e: + if e.status == 409: + raise ValueError("Sequence out of sync") + else: + raise + + + def get(self): + """Get the value""" + val = self.db.get_attributes(self.id, consistent_read=True) + if val: + if 'timestamp' in val: + self.timestamp = val['timestamp'] + if 'current_value' in val: + self._value = self.item_type(val['current_value']) + if "last_value" in val and val['last_value'] is not None: + self.last_value = self.item_type(val['last_value']) + return self._value + + val = property(get, set) + + def __repr__(self): + return "%s('%s', '%s', '%s.%s', '%s')" % ( + self.__class__.__name__, + self.id, + self.domain_name, + self.fnc.__module__, self.fnc.__name__, + self.val) + + + def _connect(self): + """Connect to our domain""" + if not self._db: + import boto + sdb = boto.connect_sdb() + if not self.domain_name: + self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default")) + try: + self._db = sdb.get_domain(self.domain_name) + except SDBResponseError as e: + if e.status == 400: + self._db = sdb.create_domain(self.domain_name) + else: + raise + return self._db + + db = property(_connect) + + def next(self): + self.val = self.fnc(self.val, self.last_value) + return self.val + + def delete(self): + """Remove this sequence""" + self.db.delete_attributes(self.id) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/test_db.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/test_db.py new file mode 100644 index 0000000000000000000000000000000000000000..ba2fb3cd5f28608e609c9bc46572b1afadadb8d2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/db/test_db.py @@ -0,0 +1,231 @@ +import logging +import time +from datetime import datetime + +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty +from boto.sdb.db.property import DateTimeProperty, FloatProperty, ReferenceProperty +from boto.sdb.db.property import PasswordProperty, ListProperty, MapProperty +from boto.exception import SDBPersistenceError + +logging.basicConfig() +log = logging.getLogger('test_db') +log.setLevel(logging.DEBUG) + +_objects = {} + +# +# This will eventually be moved to the boto.tests module and become a real unit test +# but for now it will live here. It shows examples of each of the Property types in +# use and tests the basic operations. +# +class TestBasic(Model): + + name = StringProperty() + size = IntegerProperty() + foo = BooleanProperty() + date = DateTimeProperty() + +class TestFloat(Model): + + name = StringProperty() + value = FloatProperty() + +class TestRequired(Model): + + req = StringProperty(required=True, default='foo') + +class TestReference(Model): + + ref = ReferenceProperty(reference_class=TestBasic, collection_name='refs') + +class TestSubClass(TestBasic): + + answer = IntegerProperty() + +class TestPassword(Model): + password = PasswordProperty() + +class TestList(Model): + + name = StringProperty() + nums = ListProperty(int) + +class TestMap(Model): + + name = StringProperty() + map = MapProperty() + +class TestListReference(Model): + + name = StringProperty() + basics = ListProperty(TestBasic) + +class TestAutoNow(Model): + + create_date = DateTimeProperty(auto_now_add=True) + modified_date = DateTimeProperty(auto_now=True) + +class TestUnique(Model): + name = StringProperty(unique=True) + +def test_basic(): + global _objects + t = TestBasic() + t.name = 'simple' + t.size = -42 + t.foo = True + t.date = datetime.now() + log.debug('saving object') + t.put() + _objects['test_basic_t'] = t + time.sleep(5) + log.debug('now try retrieving it') + tt = TestBasic.get_by_id(t.id) + _objects['test_basic_tt'] = tt + assert tt.id == t.id + l = TestBasic.get_by_id([t.id]) + assert len(l) == 1 + assert l[0].id == t.id + assert t.size == tt.size + assert t.foo == tt.foo + assert t.name == tt.name + #assert t.date == tt.date + return t + +def test_float(): + global _objects + t = TestFloat() + t.name = 'float object' + t.value = 98.6 + log.debug('saving object') + t.save() + _objects['test_float_t'] = t + time.sleep(5) + log.debug('now try retrieving it') + tt = TestFloat.get_by_id(t.id) + _objects['test_float_tt'] = tt + assert tt.id == t.id + assert tt.name == t.name + assert tt.value == t.value + return t + +def test_required(): + global _objects + t = TestRequired() + _objects['test_required_t'] = t + t.put() + return t + +def test_reference(t=None): + global _objects + if not t: + t = test_basic() + tt = TestReference() + tt.ref = t + tt.put() + time.sleep(10) + tt = TestReference.get_by_id(tt.id) + _objects['test_reference_tt'] = tt + assert tt.ref.id == t.id + for o in t.refs: + log.debug(o) + +def test_subclass(): + global _objects + t = TestSubClass() + _objects['test_subclass_t'] = t + t.name = 'a subclass' + t.size = -489 + t.save() + +def test_password(): + global _objects + t = TestPassword() + _objects['test_password_t'] = t + t.password = "foo" + t.save() + time.sleep(5) + # Make sure it stored ok + tt = TestPassword.get_by_id(t.id) + _objects['test_password_tt'] = tt + #Testing password equality + assert tt.password == "foo" + #Testing password not stored as string + assert str(tt.password) != "foo" + +def test_list(): + global _objects + t = TestList() + _objects['test_list_t'] = t + t.name = 'a list of ints' + t.nums = [1, 2, 3, 4, 5] + t.put() + tt = TestList.get_by_id(t.id) + _objects['test_list_tt'] = tt + assert tt.name == t.name + for n in tt.nums: + assert isinstance(n, int) + +def test_list_reference(): + global _objects + t = TestBasic() + t.put() + _objects['test_list_ref_t'] = t + tt = TestListReference() + tt.name = "foo" + tt.basics = [t] + tt.put() + time.sleep(5) + _objects['test_list_ref_tt'] = tt + ttt = TestListReference.get_by_id(tt.id) + assert ttt.basics[0].id == t.id + +def test_unique(): + global _objects + t = TestUnique() + name = 'foo' + str(int(time.time())) + t.name = name + t.put() + _objects['test_unique_t'] = t + time.sleep(10) + tt = TestUnique() + _objects['test_unique_tt'] = tt + tt.name = name + try: + tt.put() + assert False + except(SDBPersistenceError): + pass + +def test_datetime(): + global _objects + t = TestAutoNow() + t.put() + _objects['test_datetime_t'] = t + time.sleep(5) + tt = TestAutoNow.get_by_id(t.id) + assert tt.create_date.timetuple() == t.create_date.timetuple() + +def test(): + log.info('test_basic') + t1 = test_basic() + log.info('test_required') + test_required() + log.info('test_reference') + test_reference(t1) + log.info('test_subclass') + test_subclass() + log.info('test_password') + test_password() + log.info('test_list') + test_list() + log.info('test_list_reference') + test_list_reference() + log.info("test_datetime") + test_datetime() + log.info('test_unique') + test_unique() + +if __name__ == "__main__": + test() diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/domain.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/domain.py new file mode 100644 index 0000000000000000000000000000000000000000..faed813326ac95b0ebad9ef760b0657d88f284c4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/domain.py @@ -0,0 +1,380 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from __future__ import print_function + +""" +Represents an SDB Domain +""" + +from boto.sdb.queryresultset import SelectResultSet +from boto.compat import six + +class Domain(object): + + def __init__(self, connection=None, name=None): + self.connection = connection + self.name = name + self._metadata = None + + def __repr__(self): + return 'Domain:%s' % self.name + + def __iter__(self): + return iter(self.select("SELECT * FROM `%s`" % self.name)) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DomainName': + self.name = value + else: + setattr(self, name, value) + + def get_metadata(self): + if not self._metadata: + self._metadata = self.connection.domain_metadata(self) + return self._metadata + + def put_attributes(self, item_name, attributes, + replace=True, expected_value=None): + """ + Store attributes for a given item. + + :type item_name: string + :param item_name: The name of the item whose attributes are being stored. + + :type attribute_names: dict or dict-like object + :param attribute_names: The name/value pairs to store as attributes + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. The list can be + of the form: + + * ['name', 'value'] + + In which case the call will first verify that the attribute + "name" of this item has a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed error will be + returned. The list can also be of the form: + + * ['name', True|False] + + which will simply check for the existence (True) or non-existence + (False) of the attribute. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + return self.connection.put_attributes(self, item_name, attributes, + replace, expected_value) + + def batch_put_attributes(self, items, replace=True): + """ + Store attributes for multiple items. + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are themselves dictionaries + of attribute names/values, exactly the same as the + attribute_names parameter of the scalar put_attributes + call. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + return self.connection.batch_put_attributes(self, items, replace) + + def get_attributes(self, item_name, attribute_name=None, + consistent_read=False, item=None): + """ + Retrieve attributes for a given item. + + :type item_name: string + :param item_name: The name of the item whose attributes are being retrieved. + + :type attribute_names: string or list of strings + :param attribute_names: An attribute name or list of attribute names. This + parameter is optional. If not supplied, all attributes + will be retrieved for the item. + + :rtype: :class:`boto.sdb.item.Item` + :return: An Item mapping type containing the requested attribute name/values + """ + return self.connection.get_attributes(self, item_name, attribute_name, + consistent_read, item) + + def delete_attributes(self, item_name, attributes=None, + expected_values=None): + """ + Delete attributes from a given item. + + :type item_name: string + :param item_name: The name of the item whose attributes are being deleted. + + :type attributes: dict, list or :class:`boto.sdb.item.Item` + :param attributes: Either a list containing attribute names which will cause + all values associated with that attribute name to be deleted or + a dict or Item containing the attribute names and keys and list + of values to delete as the value. If no value is supplied, + all attribute name/values for the item will be deleted. + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. The list can be of + the form: + + * ['name', 'value'] + + In which case the call will first verify that the attribute "name" + of this item has a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed error will be + returned. The list can also be of the form: + + * ['name', True|False] + + which will simply check for the existence (True) or + non-existence (False) of the attribute. + + :rtype: bool + :return: True if successful + """ + return self.connection.delete_attributes(self, item_name, attributes, + expected_values) + + def batch_delete_attributes(self, items): + """ + Delete multiple items in this domain. + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are either: + + * dictionaries of attribute names/values, exactly the + same as the attribute_names parameter of the scalar + put_attributes call. The attribute name/value pairs + will only be deleted if they match the name/value + pairs passed in. + * None which means that all attributes associated + with the item should be deleted. + + :rtype: bool + :return: True if successful + """ + return self.connection.batch_delete_attributes(self, items) + + def select(self, query='', next_token=None, consistent_read=False, max_items=None): + """ + Returns a set of Attributes for item names within domain_name that match the query. + The query must be expressed in using the SELECT style syntax rather than the + original SimpleDB query language. + + :type query: string + :param query: The SimpleDB query to be performed. + + :rtype: iter + :return: An iterator containing the results. This is actually a generator + function that will iterate across all search results, not just the + first page. + """ + return SelectResultSet(self, query, max_items=max_items, next_token=next_token, + consistent_read=consistent_read) + + def get_item(self, item_name, consistent_read=False): + """ + Retrieves an item from the domain, along with all of its attributes. + + :param string item_name: The name of the item to retrieve. + :rtype: :class:`boto.sdb.item.Item` or ``None`` + :keyword bool consistent_read: When set to true, ensures that the most + recent data is returned. + :return: The requested item, or ``None`` if there was no match found + """ + item = self.get_attributes(item_name, consistent_read=consistent_read) + if item: + item.domain = self + return item + else: + return None + + def new_item(self, item_name): + return self.connection.item_cls(self, item_name) + + def delete_item(self, item): + self.delete_attributes(item.name) + + def to_xml(self, f=None): + """Get this domain as an XML DOM Document + :param f: Optional File to dump directly to + :type f: File or Stream + + :return: File object where the XML has been dumped to + :rtype: file + """ + if not f: + from tempfile import TemporaryFile + f = TemporaryFile() + print('', file=f) + print('' % self.name, file=f) + for item in self: + print('\t' % item.name, file=f) + for k in item: + print('\t\t' % k, file=f) + values = item[k] + if not isinstance(values, list): + values = [values] + for value in values: + print('\t\t\t', file=f) + print('\t\t', file=f) + print('\t', file=f) + print('', file=f) + f.flush() + f.seek(0) + return f + + + def from_xml(self, doc): + """Load this domain based on an XML document""" + import xml.sax + handler = DomainDumpParser(self) + xml.sax.parse(doc, handler) + return handler + + def delete(self): + """ + Delete this domain, and all items under it + """ + return self.connection.delete_domain(self) + + +class DomainMetaData(object): + + def __init__(self, domain=None): + self.domain = domain + self.item_count = None + self.item_names_size = None + self.attr_name_count = None + self.attr_names_size = None + self.attr_value_count = None + self.attr_values_size = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ItemCount': + self.item_count = int(value) + elif name == 'ItemNamesSizeBytes': + self.item_names_size = int(value) + elif name == 'AttributeNameCount': + self.attr_name_count = int(value) + elif name == 'AttributeNamesSizeBytes': + self.attr_names_size = int(value) + elif name == 'AttributeValueCount': + self.attr_value_count = int(value) + elif name == 'AttributeValuesSizeBytes': + self.attr_values_size = int(value) + elif name == 'Timestamp': + self.timestamp = value + else: + setattr(self, name, value) + +import sys +from xml.sax.handler import ContentHandler +class DomainDumpParser(ContentHandler): + """ + SAX parser for a domain that has been dumped + """ + + def __init__(self, domain): + self.uploader = UploaderThread(domain) + self.item_id = None + self.attrs = {} + self.attribute = None + self.value = "" + self.domain = domain + + def startElement(self, name, attrs): + if name == "Item": + self.item_id = attrs['id'] + self.attrs = {} + elif name == "attribute": + self.attribute = attrs['id'] + elif name == "value": + self.value = "" + + def characters(self, ch): + self.value += ch + + def endElement(self, name): + if name == "value": + if self.value and self.attribute: + value = self.value.strip() + attr_name = self.attribute.strip() + if attr_name in self.attrs: + self.attrs[attr_name].append(value) + else: + self.attrs[attr_name] = [value] + elif name == "Item": + self.uploader.items[self.item_id] = self.attrs + # Every 20 items we spawn off the uploader + if len(self.uploader.items) >= 20: + self.uploader.start() + self.uploader = UploaderThread(self.domain) + elif name == "Domain": + # If we're done, spawn off our last Uploader Thread + self.uploader.start() + +from threading import Thread +class UploaderThread(Thread): + """Uploader Thread""" + + def __init__(self, domain): + self.db = domain + self.items = {} + super(UploaderThread, self).__init__() + + def run(self): + try: + self.db.batch_put_attributes(self.items) + except: + print("Exception using batch put, trying regular put instead") + for item_name in self.items: + self.db.put_attributes(item_name, self.items[item_name]) + print(".", end=' ') + sys.stdout.flush() diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/item.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/item.py new file mode 100644 index 0000000000000000000000000000000000000000..e09a9d9a2c1f17e0d14d92da7c696f33f56d3e69 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/item.py @@ -0,0 +1,177 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import base64 + +class Item(dict): + """ + A ``dict`` sub-class that serves as an object representation of a + SimpleDB item. An item in SDB is similar to a row in a relational + database. Items belong to a :py:class:`Domain `, + which is similar to a table in a relational database. + + The keys on instances of this object correspond to attributes that are + stored on the SDB item. + + .. tip:: While it is possible to instantiate this class directly, you may + want to use the convenience methods on :py:class:`boto.sdb.domain.Domain` + for that purpose. For example, :py:meth:`boto.sdb.domain.Domain.get_item`. + """ + def __init__(self, domain, name='', active=False): + """ + :type domain: :py:class:`boto.sdb.domain.Domain` + :param domain: The domain that this item belongs to. + + :param str name: The name of this item. This name will be used when + querying for items using methods like + :py:meth:`boto.sdb.domain.Domain.get_item` + """ + dict.__init__(self) + self.domain = domain + self.name = name + self.active = active + self.request_id = None + self.encoding = None + self.in_attribute = False + self.converter = self.domain.connection.converter + + def startElement(self, name, attrs, connection): + if name == 'Attribute': + self.in_attribute = True + self.encoding = attrs.get('encoding', None) + return None + + def decode_value(self, value): + if self.encoding == 'base64': + self.encoding = None + return base64.decodestring(value) + else: + return value + + def endElement(self, name, value, connection): + if name == 'ItemName': + self.name = self.decode_value(value) + elif name == 'Name': + if self.in_attribute: + self.last_key = self.decode_value(value) + else: + self.name = self.decode_value(value) + elif name == 'Value': + if self.last_key in self: + if not isinstance(self[self.last_key], list): + self[self.last_key] = [self[self.last_key]] + value = self.decode_value(value) + if self.converter: + value = self.converter.decode(value) + self[self.last_key].append(value) + else: + value = self.decode_value(value) + if self.converter: + value = self.converter.decode(value) + self[self.last_key] = value + elif name == 'BoxUsage': + try: + connection.box_usage += float(value) + except: + pass + elif name == 'RequestId': + self.request_id = value + elif name == 'Attribute': + self.in_attribute = False + else: + setattr(self, name, value) + + def load(self): + """ + Loads or re-loads this item's attributes from SDB. + + .. warning:: + If you have changed attribute values on an Item instance, + this method will over-write the values if they are different in + SDB. For any local attributes that don't yet exist in SDB, + they will be safe. + """ + self.domain.get_attributes(self.name, item=self) + + def save(self, replace=True): + """ + Saves this item to SDB. + + :param bool replace: If ``True``, delete any attributes on the remote + SDB item that have a ``None`` value on this object. + """ + self.domain.put_attributes(self.name, self, replace) + # Delete any attributes set to "None" + if replace: + del_attrs = [] + for name in self: + if self[name] is None: + del_attrs.append(name) + if len(del_attrs) > 0: + self.domain.delete_attributes(self.name, del_attrs) + + def add_value(self, key, value): + """ + Helps set or add to attributes on this item. If you are adding a new + attribute that has yet to be set, it will simply create an attribute + named ``key`` with your given ``value`` as its value. If you are + adding a value to an existing attribute, this method will convert the + attribute to a list (if it isn't already) and append your new value + to said list. + + For clarification, consider the following interactive session: + + .. code-block:: python + + >>> item = some_domain.get_item('some_item') + >>> item.has_key('some_attr') + False + >>> item.add_value('some_attr', 1) + >>> item['some_attr'] + 1 + >>> item.add_value('some_attr', 2) + >>> item['some_attr'] + [1, 2] + + :param str key: The attribute to add a value to. + :param object value: The value to set or append to the attribute. + """ + if key in self: + # We already have this key on the item. + if not isinstance(self[key], list): + # The key isn't already a list, take its current value and + # convert it to a list with the only member being the + # current value. + self[key] = [self[key]] + # Add the new value to the list. + self[key].append(value) + else: + # This is a new attribute, just set it. + self[key] = value + + def delete(self): + """ + Deletes this item in SDB. + + .. note:: This local Python object remains in its current state + after deletion, this only deletes the remote item in SDB. + """ + self.domain.delete_item(self) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/queryresultset.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/queryresultset.py new file mode 100644 index 0000000000000000000000000000000000000000..54f35238b65af0f8fb343bbd62a4a71e802fd161 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/queryresultset.py @@ -0,0 +1,93 @@ +from boto.compat import six +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +def query_lister(domain, query='', max_items=None, attr_names=None): + more_results = True + num_results = 0 + next_token = None + while more_results: + rs = domain.connection.query_with_attributes(domain, query, attr_names, + next_token=next_token) + for item in rs: + if max_items: + if num_results == max_items: + raise StopIteration + yield item + num_results += 1 + next_token = rs.next_token + more_results = next_token is not None + +class QueryResultSet(object): + + def __init__(self, domain=None, query='', max_items=None, attr_names=None): + self.max_items = max_items + self.domain = domain + self.query = query + self.attr_names = attr_names + + def __iter__(self): + return query_lister(self.domain, self.query, self.max_items, self.attr_names) + +def select_lister(domain, query='', max_items=None): + more_results = True + num_results = 0 + next_token = None + while more_results: + rs = domain.connection.select(domain, query, next_token=next_token) + for item in rs: + if max_items: + if num_results == max_items: + raise StopIteration + yield item + num_results += 1 + next_token = rs.next_token + more_results = next_token is not None + +class SelectResultSet(object): + + def __init__(self, domain=None, query='', max_items=None, + next_token=None, consistent_read=False): + self.domain = domain + self.query = query + self.consistent_read = consistent_read + self.max_items = max_items + self.next_token = next_token + + def __iter__(self): + more_results = True + num_results = 0 + while more_results: + rs = self.domain.connection.select(self.domain, self.query, + next_token=self.next_token, + consistent_read=self.consistent_read) + for item in rs: + if self.max_items and num_results >= self.max_items: + raise StopIteration + yield item + num_results += 1 + self.next_token = rs.next_token + if self.max_items and num_results >= self.max_items: + raise StopIteration + more_results = self.next_token is not None + + def next(self): + return next(self.__iter__()) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sdb/regioninfo.py b/desktop/core/ext-py/boto-2.38.0/boto/sdb/regioninfo.py new file mode 100644 index 0000000000000000000000000000000000000000..cb0211e1612ff24665ca25a32b7104a604a39e0f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sdb/regioninfo.py @@ -0,0 +1,33 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo + +class SDBRegionInfo(RegionInfo): + + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + from boto.sdb.connection import SDBConnection + super(SDBRegionInfo, self).__init__(connection, name, endpoint, + SDBConnection) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/services/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/services/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..449bd162a8ea33724103f1cba717f3255d1edea1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/services/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/services/bs.py b/desktop/core/ext-py/boto-2.38.0/boto/services/bs.py new file mode 100755 index 0000000000000000000000000000000000000000..396c483975804b07ced09bb3c932f05a494fd8fa --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/services/bs.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from optparse import OptionParser +from boto.services.servicedef import ServiceDef +from boto.services.submit import Submitter +from boto.services.result import ResultProcessor +import boto +import sys, os +from boto.compat import StringIO + +class BS(object): + + Usage = "usage: %prog [options] config_file command" + + Commands = {'reset' : 'Clear input queue and output bucket', + 'submit' : 'Submit local files to the service', + 'start' : 'Start the service', + 'status' : 'Report on the status of the service buckets and queues', + 'retrieve' : 'Retrieve output generated by a batch', + 'batches' : 'List all batches stored in current output_domain'} + + def __init__(self): + self.service_name = None + self.parser = OptionParser(usage=self.Usage) + self.parser.add_option("--help-commands", action="store_true", dest="help_commands", + help="provides help on the available commands") + self.parser.add_option("-a", "--access-key", action="store", type="string", + help="your AWS Access Key") + self.parser.add_option("-s", "--secret-key", action="store", type="string", + help="your AWS Secret Access Key") + self.parser.add_option("-p", "--path", action="store", type="string", dest="path", + help="the path to local directory for submit and retrieve") + self.parser.add_option("-k", "--keypair", action="store", type="string", dest="keypair", + help="the SSH keypair used with launched instance(s)") + self.parser.add_option("-l", "--leave", action="store_true", dest="leave", + help="leave the files (don't retrieve) files during retrieve command") + self.parser.set_defaults(leave=False) + self.parser.add_option("-n", "--num-instances", action="store", type="string", dest="num_instances", + help="the number of launched instance(s)") + self.parser.set_defaults(num_instances=1) + self.parser.add_option("-i", "--ignore-dirs", action="append", type="string", dest="ignore", + help="directories that should be ignored by submit command") + self.parser.add_option("-b", "--batch-id", action="store", type="string", dest="batch", + help="batch identifier required by the retrieve command") + + def print_command_help(self): + print('\nCommands:') + for key in self.Commands.keys(): + print(' %s\t\t%s' % (key, self.Commands[key])) + + def do_reset(self): + iq = self.sd.get_obj('input_queue') + if iq: + print('clearing out input queue') + i = 0 + m = iq.read() + while m: + i += 1 + iq.delete_message(m) + m = iq.read() + print('deleted %d messages' % i) + ob = self.sd.get_obj('output_bucket') + ib = self.sd.get_obj('input_bucket') + if ob: + if ib and ob.name == ib.name: + return + print('delete generated files in output bucket') + i = 0 + for k in ob: + i += 1 + k.delete() + print('deleted %d keys' % i) + + def do_submit(self): + if not self.options.path: + self.parser.error('No path provided') + if not os.path.exists(self.options.path): + self.parser.error('Invalid path (%s)' % self.options.path) + s = Submitter(self.sd) + t = s.submit_path(self.options.path, None, self.options.ignore, None, + None, True, self.options.path) + print('A total of %d files were submitted' % t[1]) + print('Batch Identifier: %s' % t[0]) + + def do_start(self): + ami_id = self.sd.get('ami_id') + instance_type = self.sd.get('instance_type', 'm1.small') + security_group = self.sd.get('security_group', 'default') + if not ami_id: + self.parser.error('ami_id option is required when starting the service') + ec2 = boto.connect_ec2() + if not self.sd.has_section('Credentials'): + self.sd.add_section('Credentials') + self.sd.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id) + self.sd.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key) + s = StringIO() + self.sd.write(s) + rs = ec2.get_all_images([ami_id]) + img = rs[0] + r = img.run(user_data=s.getvalue(), key_name=self.options.keypair, + max_count=self.options.num_instances, + instance_type=instance_type, + security_groups=[security_group]) + print('Starting AMI: %s' % ami_id) + print('Reservation %s contains the following instances:' % r.id) + for i in r.instances: + print('\t%s' % i.id) + + def do_status(self): + iq = self.sd.get_obj('input_queue') + if iq: + print('The input_queue (%s) contains approximately %s messages' % (iq.id, iq.count())) + ob = self.sd.get_obj('output_bucket') + ib = self.sd.get_obj('input_bucket') + if ob: + if ib and ob.name == ib.name: + return + total = 0 + for k in ob: + total += 1 + print('The output_bucket (%s) contains %d keys' % (ob.name, total)) + + def do_retrieve(self): + if not self.options.path: + self.parser.error('No path provided') + if not os.path.exists(self.options.path): + self.parser.error('Invalid path (%s)' % self.options.path) + if not self.options.batch: + self.parser.error('batch identifier is required for retrieve command') + s = ResultProcessor(self.options.batch, self.sd) + s.get_results(self.options.path, get_file=(not self.options.leave)) + + def do_batches(self): + d = self.sd.get_obj('output_domain') + if d: + print('Available Batches:') + rs = d.query("['type'='Batch']") + for item in rs: + print(' %s' % item.name) + else: + self.parser.error('No output_domain specified for service') + + def main(self): + self.options, self.args = self.parser.parse_args() + if self.options.help_commands: + self.print_command_help() + sys.exit(0) + if len(self.args) != 2: + self.parser.error("config_file and command are required") + self.config_file = self.args[0] + self.sd = ServiceDef(self.config_file) + self.command = self.args[1] + if hasattr(self, 'do_%s' % self.command): + method = getattr(self, 'do_%s' % self.command) + method() + else: + self.parser.error('command (%s) not recognized' % self.command) + +if __name__ == "__main__": + bs = BS() + bs.main() diff --git a/desktop/core/ext-py/boto-2.38.0/boto/services/message.py b/desktop/core/ext-py/boto-2.38.0/boto/services/message.py new file mode 100644 index 0000000000000000000000000000000000000000..31f37019fc90475ef68e3b2f957c8095d3c5fbfb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/services/message.py @@ -0,0 +1,58 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.sqs.message import MHMessage +from boto.utils import get_ts +from socket import gethostname +import os, mimetypes, time + +class ServiceMessage(MHMessage): + + def for_key(self, key, params=None, bucket_name=None): + if params: + self.update(params) + if key.path: + t = os.path.split(key.path) + self['OriginalLocation'] = t[0] + self['OriginalFileName'] = t[1] + mime_type = mimetypes.guess_type(t[1])[0] + if mime_type is None: + mime_type = 'application/octet-stream' + self['Content-Type'] = mime_type + s = os.stat(key.path) + t = time.gmtime(s[7]) + self['FileAccessedDate'] = get_ts(t) + t = time.gmtime(s[8]) + self['FileModifiedDate'] = get_ts(t) + t = time.gmtime(s[9]) + self['FileCreateDate'] = get_ts(t) + else: + self['OriginalFileName'] = key.name + self['OriginalLocation'] = key.bucket.name + self['ContentType'] = key.content_type + self['Host'] = gethostname() + if bucket_name: + self['Bucket'] = bucket_name + else: + self['Bucket'] = key.bucket.name + self['InputKey'] = key.name + self['Size'] = key.size + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/services/result.py b/desktop/core/ext-py/boto-2.38.0/boto/services/result.py new file mode 100644 index 0000000000000000000000000000000000000000..879934323ba2bf8167eebd07c54bb42b3e5950ab --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/services/result.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import os +from datetime import datetime, timedelta +from boto.utils import parse_ts +import boto + +class ResultProcessor(object): + + LogFileName = 'log.csv' + + def __init__(self, batch_name, sd, mimetype_files=None): + self.sd = sd + self.batch = batch_name + self.log_fp = None + self.num_files = 0 + self.total_time = 0 + self.min_time = timedelta.max + self.max_time = timedelta.min + self.earliest_time = datetime.max + self.latest_time = datetime.min + self.queue = self.sd.get_obj('output_queue') + self.domain = self.sd.get_obj('output_domain') + + def calculate_stats(self, msg): + start_time = parse_ts(msg['Service-Read']) + end_time = parse_ts(msg['Service-Write']) + elapsed_time = end_time - start_time + if elapsed_time > self.max_time: + self.max_time = elapsed_time + if elapsed_time < self.min_time: + self.min_time = elapsed_time + self.total_time += elapsed_time.seconds + if start_time < self.earliest_time: + self.earliest_time = start_time + if end_time > self.latest_time: + self.latest_time = end_time + + def log_message(self, msg, path): + keys = sorted(msg.keys()) + if not self.log_fp: + self.log_fp = open(os.path.join(path, self.LogFileName), 'a') + line = ','.join(keys) + self.log_fp.write(line+'\n') + values = [] + for key in keys: + value = msg[key] + if value.find(',') > 0: + value = '"%s"' % value + values.append(value) + line = ','.join(values) + self.log_fp.write(line+'\n') + + def process_record(self, record, path, get_file=True): + self.log_message(record, path) + self.calculate_stats(record) + outputs = record['OutputKey'].split(',') + if 'OutputBucket' in record: + bucket = boto.lookup('s3', record['OutputBucket']) + else: + bucket = boto.lookup('s3', record['Bucket']) + for output in outputs: + if get_file: + key_name = output.split(';')[0] + key = bucket.lookup(key_name) + file_name = os.path.join(path, key_name) + print('retrieving file: %s to %s' % (key_name, file_name)) + key.get_contents_to_filename(file_name) + self.num_files += 1 + + def get_results_from_queue(self, path, get_file=True, delete_msg=True): + m = self.queue.read() + while m: + if 'Batch' in m and m['Batch'] == self.batch: + self.process_record(m, path, get_file) + if delete_msg: + self.queue.delete_message(m) + m = self.queue.read() + + def get_results_from_domain(self, path, get_file=True): + rs = self.domain.query("['Batch'='%s']" % self.batch) + for item in rs: + self.process_record(item, path, get_file) + + def get_results_from_bucket(self, path): + bucket = self.sd.get_obj('output_bucket') + if bucket: + print('No output queue or domain, just retrieving files from output_bucket') + for key in bucket: + file_name = os.path.join(path, key) + print('retrieving file: %s to %s' % (key, file_name)) + key.get_contents_to_filename(file_name) + self.num_files + 1 + + def get_results(self, path, get_file=True, delete_msg=True): + if not os.path.isdir(path): + os.mkdir(path) + if self.queue: + self.get_results_from_queue(path, get_file) + elif self.domain: + self.get_results_from_domain(path, get_file) + else: + self.get_results_from_bucket(path) + if self.log_fp: + self.log_fp.close() + print('%d results successfully retrieved.' % self.num_files) + if self.num_files > 0: + self.avg_time = float(self.total_time)/self.num_files + print('Minimum Processing Time: %d' % self.min_time.seconds) + print('Maximum Processing Time: %d' % self.max_time.seconds) + print('Average Processing Time: %f' % self.avg_time) + self.elapsed_time = self.latest_time-self.earliest_time + print('Elapsed Time: %d' % self.elapsed_time.seconds) + tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files) + print('Throughput: %f transactions / minute' % tput) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/services/service.py b/desktop/core/ext-py/boto-2.38.0/boto/services/service.py new file mode 100644 index 0000000000000000000000000000000000000000..e1a04c8ec21c9e155da46ef78da1d28cf86c930a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/services/service.py @@ -0,0 +1,161 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.services.message import ServiceMessage +from boto.services.servicedef import ServiceDef +from boto.pyami.scriptbase import ScriptBase +from boto.utils import get_ts +import time +import os +import mimetypes + + +class Service(ScriptBase): + + # Time required to process a transaction + ProcessingTime = 60 + + def __init__(self, config_file=None, mimetype_files=None): + super(Service, self).__init__(config_file) + self.name = self.__class__.__name__ + self.working_dir = boto.config.get('Pyami', 'working_dir') + self.sd = ServiceDef(config_file) + self.retry_count = self.sd.getint('retry_count', 5) + self.loop_delay = self.sd.getint('loop_delay', 30) + self.processing_time = self.sd.getint('processing_time', 60) + self.input_queue = self.sd.get_obj('input_queue') + self.output_queue = self.sd.get_obj('output_queue') + self.output_domain = self.sd.get_obj('output_domain') + if mimetype_files: + mimetypes.init(mimetype_files) + + def split_key(key): + if key.find(';') < 0: + t = (key, '') + else: + key, type = key.split(';') + label, mtype = type.split('=') + t = (key, mtype) + return t + + def read_message(self): + boto.log.info('read_message') + message = self.input_queue.read(self.processing_time) + if message: + boto.log.info(message.get_body()) + key = 'Service-Read' + message[key] = get_ts() + return message + + # retrieve the source file from S3 + def get_file(self, message): + bucket_name = message['Bucket'] + key_name = message['InputKey'] + file_name = os.path.join(self.working_dir, message.get('OriginalFileName', 'in_file')) + boto.log.info('get_file: %s/%s to %s' % (bucket_name, key_name, file_name)) + bucket = boto.lookup('s3', bucket_name) + key = bucket.new_key(key_name) + key.get_contents_to_filename(os.path.join(self.working_dir, file_name)) + return file_name + + # process source file, return list of output files + def process_file(self, in_file_name, msg): + return [] + + # store result file in S3 + def put_file(self, bucket_name, file_path, key_name=None): + boto.log.info('putting file %s as %s.%s' % (file_path, bucket_name, key_name)) + bucket = boto.lookup('s3', bucket_name) + key = bucket.new_key(key_name) + key.set_contents_from_filename(file_path) + return key + + def save_results(self, results, input_message, output_message): + output_keys = [] + for file, type in results: + if 'OutputBucket' in input_message: + output_bucket = input_message['OutputBucket'] + else: + output_bucket = input_message['Bucket'] + key_name = os.path.split(file)[1] + key = self.put_file(output_bucket, file, key_name) + output_keys.append('%s;type=%s' % (key.name, type)) + output_message['OutputKey'] = ','.join(output_keys) + + # write message to each output queue + def write_message(self, message): + message['Service-Write'] = get_ts() + message['Server'] = self.name + if 'HOSTNAME' in os.environ: + message['Host'] = os.environ['HOSTNAME'] + else: + message['Host'] = 'unknown' + message['Instance-ID'] = self.instance_id + if self.output_queue: + boto.log.info('Writing message to SQS queue: %s' % self.output_queue.id) + self.output_queue.write(message) + if self.output_domain: + boto.log.info('Writing message to SDB domain: %s' % self.output_domain.name) + item_name = '/'.join([message['Service-Write'], message['Bucket'], message['InputKey']]) + self.output_domain.put_attributes(item_name, message) + + # delete message from input queue + def delete_message(self, message): + boto.log.info('deleting message from %s' % self.input_queue.id) + self.input_queue.delete_message(message) + + # to clean up any files, etc. after each iteration + def cleanup(self): + pass + + def shutdown(self): + on_completion = self.sd.get('on_completion', 'shutdown') + if on_completion == 'shutdown': + if self.instance_id: + time.sleep(60) + c = boto.connect_ec2() + c.terminate_instances([self.instance_id]) + + def main(self, notify=False): + self.notify('Service: %s Starting' % self.name) + empty_reads = 0 + while self.retry_count < 0 or empty_reads < self.retry_count: + try: + input_message = self.read_message() + if input_message: + empty_reads = 0 + output_message = ServiceMessage(None, input_message.get_body()) + input_file = self.get_file(input_message) + results = self.process_file(input_file, output_message) + self.save_results(results, input_message, output_message) + self.write_message(output_message) + self.delete_message(input_message) + self.cleanup() + else: + empty_reads += 1 + time.sleep(self.loop_delay) + except Exception: + boto.log.exception('Service Failed') + empty_reads += 1 + self.notify('Service: %s Shutting Down' % self.name) + self.shutdown() + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/services/servicedef.py b/desktop/core/ext-py/boto-2.38.0/boto/services/servicedef.py new file mode 100644 index 0000000000000000000000000000000000000000..a43b3f342a29d24b1261348588a5ad883cfcaae8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/services/servicedef.py @@ -0,0 +1,91 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.pyami.config import Config +from boto.services.message import ServiceMessage +import boto + +class ServiceDef(Config): + + def __init__(self, config_file, aws_access_key_id=None, aws_secret_access_key=None): + super(ServiceDef, self).__init__(config_file) + self.aws_access_key_id = aws_access_key_id + self.aws_secret_access_key = aws_secret_access_key + script = Config.get(self, 'Pyami', 'scripts') + if script: + self.name = script.split('.')[-1] + else: + self.name = None + + + def get(self, name, default=None): + return super(ServiceDef, self).get(self.name, name, default) + + def has_option(self, option): + return super(ServiceDef, self).has_option(self.name, option) + + def getint(self, option, default=0): + try: + val = super(ServiceDef, self).get(self.name, option) + val = int(val) + except: + val = int(default) + return val + + def getbool(self, option, default=False): + try: + val = super(ServiceDef, self).get(self.name, option) + if val.lower() == 'true': + val = True + else: + val = False + except: + val = default + return val + + def get_obj(self, name): + """ + Returns the AWS object associated with a given option. + + The heuristics used are a bit lame. If the option name contains + the word 'bucket' it is assumed to be an S3 bucket, if the name + contains the word 'queue' it is assumed to be an SQS queue and + if it contains the word 'domain' it is assumed to be a SimpleDB + domain. If the option name specified does not exist in the + config file or if the AWS object cannot be retrieved this + returns None. + """ + val = self.get(name) + if not val: + return None + if name.find('queue') >= 0: + obj = boto.lookup('sqs', val) + if obj: + obj.set_message_class(ServiceMessage) + elif name.find('bucket') >= 0: + obj = boto.lookup('s3', val) + elif name.find('domain') >= 0: + obj = boto.lookup('sdb', val) + else: + obj = None + return obj + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/services/sonofmmm.cfg b/desktop/core/ext-py/boto-2.38.0/boto/services/sonofmmm.cfg new file mode 100644 index 0000000000000000000000000000000000000000..d70d3794d5d4645f87b126a1e49c8809bfc14cd6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/services/sonofmmm.cfg @@ -0,0 +1,43 @@ +# +# Your AWS Credentials +# You only need to supply these in this file if you are not using +# the boto tools to start your service +# +#[Credentials] +#aws_access_key_id = +#aws_secret_access_key = + +# +# Fill out this section if you want emails from the service +# when it starts and stops +# +#[Notification] +#smtp_host = +#smtp_user = +#smtp_pass = +#smtp_from = +#smtp_to = + +[Pyami] +scripts = boto.services.sonofmmm.SonOfMMM + +[SonOfMMM] +# id of the AMI to be launched +ami_id = ami-dc799cb5 +# number of times service will read an empty queue before exiting +# a negative value will cause the service to run forever +retry_count = 5 +# seconds to wait after empty queue read before reading again +loop_delay = 10 +# average time it takes to process a transaction +# controls invisibility timeout of messages +processing_time = 60 +ffmpeg_args = -y -i %%s -f mov -r 29.97 -b 1200kb -mbd 2 -flags +4mv+trell -aic 2 -cmp 2 -subcmp 2 -ar 48000 -ab 19200 -s 320x240 -vcodec mpeg4 -acodec libfaac %%s +output_mimetype = video/quicktime +output_ext = .mov +input_bucket = +output_bucket = +output_domain = +output_queue = +input_queue = + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/services/sonofmmm.py b/desktop/core/ext-py/boto-2.38.0/boto/services/sonofmmm.py new file mode 100644 index 0000000000000000000000000000000000000000..3ef60838b8017946bbd52b2c4ffecb6c8cf40e1f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/services/sonofmmm.py @@ -0,0 +1,81 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.services.service import Service +from boto.services.message import ServiceMessage +import os +import mimetypes + +class SonOfMMM(Service): + + def __init__(self, config_file=None): + super(SonOfMMM, self).__init__(config_file) + self.log_file = '%s.log' % self.instance_id + self.log_path = os.path.join(self.working_dir, self.log_file) + boto.set_file_logger(self.name, self.log_path) + if self.sd.has_option('ffmpeg_args'): + self.command = '/usr/local/bin/ffmpeg ' + self.sd.get('ffmpeg_args') + else: + self.command = '/usr/local/bin/ffmpeg -y -i %s %s' + self.output_mimetype = self.sd.get('output_mimetype') + if self.sd.has_option('output_ext'): + self.output_ext = self.sd.get('output_ext') + else: + self.output_ext = mimetypes.guess_extension(self.output_mimetype) + self.output_bucket = self.sd.get_obj('output_bucket') + self.input_bucket = self.sd.get_obj('input_bucket') + # check to see if there are any messages queue + # if not, create messages for all files in input_bucket + m = self.input_queue.read(1) + if not m: + self.queue_files() + + def queue_files(self): + boto.log.info('Queueing files from %s' % self.input_bucket.name) + for key in self.input_bucket: + boto.log.info('Queueing %s' % key.name) + m = ServiceMessage() + if self.output_bucket: + d = {'OutputBucket' : self.output_bucket.name} + else: + d = None + m.for_key(key, d) + self.input_queue.write(m) + + def process_file(self, in_file_name, msg): + base, ext = os.path.splitext(in_file_name) + out_file_name = os.path.join(self.working_dir, + base+self.output_ext) + command = self.command % (in_file_name, out_file_name) + boto.log.info('running:\n%s' % command) + status = self.run(command) + if status == 0: + return [(out_file_name, self.output_mimetype)] + else: + return [] + + def shutdown(self): + if os.path.isfile(self.log_path): + if self.output_bucket: + key = self.output_bucket.new_key(self.log_file) + key.set_contents_from_filename(self.log_path) + super(SonOfMMM, self).shutdown() diff --git a/desktop/core/ext-py/boto-2.38.0/boto/services/submit.py b/desktop/core/ext-py/boto-2.38.0/boto/services/submit.py new file mode 100644 index 0000000000000000000000000000000000000000..69be236adb3a52a9ee6cc2941e7f5be5db463afc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/services/submit.py @@ -0,0 +1,87 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import time +import os + + +class Submitter(object): + + def __init__(self, sd): + self.sd = sd + self.input_bucket = self.sd.get_obj('input_bucket') + self.output_bucket = self.sd.get_obj('output_bucket') + self.output_domain = self.sd.get_obj('output_domain') + self.queue = self.sd.get_obj('input_queue') + + def get_key_name(self, fullpath, prefix): + key_name = fullpath[len(prefix):] + l = key_name.split(os.sep) + return '/'.join(l) + + def write_message(self, key, metadata): + if self.queue: + m = self.queue.new_message() + m.for_key(key, metadata) + if self.output_bucket: + m['OutputBucket'] = self.output_bucket.name + self.queue.write(m) + + def submit_file(self, path, metadata=None, cb=None, num_cb=0, prefix='/'): + if not metadata: + metadata = {} + key_name = self.get_key_name(path, prefix) + k = self.input_bucket.new_key(key_name) + k.update_metadata(metadata) + k.set_contents_from_filename(path, replace=False, cb=cb, num_cb=num_cb) + self.write_message(k, metadata) + + def submit_path(self, path, tags=None, ignore_dirs=None, cb=None, num_cb=0, status=False, prefix='/'): + path = os.path.expanduser(path) + path = os.path.expandvars(path) + path = os.path.abspath(path) + total = 0 + metadata = {} + if tags: + metadata['Tags'] = tags + l = [] + for t in time.gmtime(): + l.append(str(t)) + metadata['Batch'] = '_'.join(l) + if self.output_domain: + self.output_domain.put_attributes(metadata['Batch'], {'type' : 'Batch'}) + if os.path.isdir(path): + for root, dirs, files in os.walk(path): + if ignore_dirs: + for ignore in ignore_dirs: + if ignore in dirs: + dirs.remove(ignore) + for file in files: + fullpath = os.path.join(root, file) + if status: + print('Submitting %s' % fullpath) + self.submit_file(fullpath, metadata, cb, num_cb, prefix) + total += 1 + elif os.path.isfile(path): + self.submit_file(path, metadata, cb, num_cb) + total += 1 + else: + print('problem with %s' % path) + return (metadata['Batch'], total) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ses/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/ses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ee32451699ef3d10da24267890eaaca083372cf1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ses/__init__.py @@ -0,0 +1,52 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Harry Marr http://hmarr.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ses.connection import SESConnection +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the SES service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + return get_regions('ses', connection_cls=SESConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.ses.connection.SESConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.ses.connection.SESConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ses/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/ses/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..244029a07ad1fc13da65b4bbba9def6dd08f54da --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ses/connection.py @@ -0,0 +1,565 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Harry Marr http://hmarr.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import re +import base64 + +from boto.compat import six, urllib +from boto.connection import AWSAuthConnection +from boto.exception import BotoServerError +from boto.regioninfo import RegionInfo +import boto +import boto.jsonresponse +from boto.ses import exceptions as ses_exceptions + + +class SESConnection(AWSAuthConnection): + + ResponseError = BotoServerError + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'email.us-east-1.amazonaws.com' + APIVersion = '2010-12-01' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(SESConnection, self).__init__(self.region.endpoint, + aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, debug, + https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['ses'] + + def _build_list_params(self, params, items, label): + """Add an AWS API-compatible parameter list to a dictionary. + + :type params: dict + :param params: The parameter dictionary + + :type items: list + :param items: Items to be included in the list + + :type label: string + :param label: The parameter list's name + """ + if isinstance(items, six.string_types): + items = [items] + for i in range(1, len(items) + 1): + params['%s.%d' % (label, i)] = items[i - 1] + + def _make_request(self, action, params=None): + """Make a call to the SES API. + + :type action: string + :param action: The API method to use (e.g. SendRawEmail) + + :type params: dict + :param params: Parameters that will be sent as POST data with the API + call. + """ + ct = 'application/x-www-form-urlencoded; charset=UTF-8' + headers = {'Content-Type': ct} + params = params or {} + params['Action'] = action + + for k, v in params.items(): + if isinstance(v, six.text_type): # UTF-8 encode only if it's Unicode + params[k] = v.encode('utf-8') + + response = super(SESConnection, self).make_request( + 'POST', + '/', + headers=headers, + data=urllib.parse.urlencode(params) + ) + body = response.read().decode('utf-8') + if response.status == 200: + list_markers = ('VerifiedEmailAddresses', 'Identities', + 'DkimTokens', 'DkimAttributes', + 'VerificationAttributes', 'SendDataPoints') + item_markers = ('member', 'item', 'entry') + + e = boto.jsonresponse.Element(list_marker=list_markers, + item_marker=item_markers) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + else: + # HTTP codes other than 200 are considered errors. Go through + # some error handling to determine which exception gets raised, + self._handle_error(response, body) + + def _handle_error(self, response, body): + """ + Handle raising the correct exception, depending on the error. Many + errors share the same HTTP response code, meaning we have to get really + kludgey and do string searches to figure out what went wrong. + """ + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + + if "Address blacklisted." in body: + # Delivery failures happened frequently enough with the recipient's + # email address for Amazon to blacklist it. After a day or three, + # they'll be automatically removed, and delivery can be attempted + # again (if you write the code to do so in your application). + ExceptionToRaise = ses_exceptions.SESAddressBlacklistedError + exc_reason = "Address blacklisted." + elif "Email address is not verified." in body: + # This error happens when the "Reply-To" value passed to + # send_email() hasn't been verified yet. + ExceptionToRaise = ses_exceptions.SESAddressNotVerifiedError + exc_reason = "Email address is not verified." + elif "Daily message quota exceeded." in body: + # Encountered when your account exceeds the maximum total number + # of emails per 24 hours. + ExceptionToRaise = ses_exceptions.SESDailyQuotaExceededError + exc_reason = "Daily message quota exceeded." + elif "Maximum sending rate exceeded." in body: + # Your account has sent above its allowed requests a second rate. + ExceptionToRaise = ses_exceptions.SESMaxSendingRateExceededError + exc_reason = "Maximum sending rate exceeded." + elif "Domain ends with dot." in body: + # Recipient address ends with a dot/period. This is invalid. + ExceptionToRaise = ses_exceptions.SESDomainEndsWithDotError + exc_reason = "Domain ends with dot." + elif "Local address contains control or whitespace" in body: + # I think this pertains to the recipient address. + ExceptionToRaise = ses_exceptions.SESLocalAddressCharacterError + exc_reason = "Local address contains control or whitespace." + elif "Illegal address" in body: + # A clearly mal-formed address. + ExceptionToRaise = ses_exceptions.SESIllegalAddressError + exc_reason = "Illegal address" + # The re.search is to distinguish from the + # SESAddressNotVerifiedError error above. + elif re.search('Identity.*is not verified', body): + ExceptionToRaise = ses_exceptions.SESIdentityNotVerifiedError + exc_reason = "Identity is not verified." + elif "ownership not confirmed" in body: + ExceptionToRaise = ses_exceptions.SESDomainNotConfirmedError + exc_reason = "Domain ownership is not confirmed." + else: + # This is either a common AWS error, or one that we don't devote + # its own exception to. + ExceptionToRaise = self.ResponseError + exc_reason = response.reason + + raise ExceptionToRaise(response.status, exc_reason, body) + + def send_email(self, source, subject, body, to_addresses, + cc_addresses=None, bcc_addresses=None, + format='text', reply_addresses=None, + return_path=None, text_body=None, html_body=None): + """Composes an email message based on input data, and then immediately + queues the message for sending. + + :type source: string + :param source: The sender's email address. + + :type subject: string + :param subject: The subject of the message: A short summary of the + content, which will appear in the recipient's inbox. + + :type body: string + :param body: The message body. + + :type to_addresses: list of strings or string + :param to_addresses: The To: field(s) of the message. + + :type cc_addresses: list of strings or string + :param cc_addresses: The CC: field(s) of the message. + + :type bcc_addresses: list of strings or string + :param bcc_addresses: The BCC: field(s) of the message. + + :type format: string + :param format: The format of the message's body, must be either "text" + or "html". + + :type reply_addresses: list of strings or string + :param reply_addresses: The reply-to email address(es) for the + message. If the recipient replies to the + message, each reply-to address will + receive the reply. + + :type return_path: string + :param return_path: The email address to which bounce notifications are + to be forwarded. If the message cannot be delivered + to the recipient, then an error message will be + returned from the recipient's ISP; this message + will then be forwarded to the email address + specified by the ReturnPath parameter. + + :type text_body: string + :param text_body: The text body to send with this email. + + :type html_body: string + :param html_body: The html body to send with this email. + + """ + format = format.lower().strip() + if body is not None: + if format == "text": + if text_body is not None: + raise Warning("You've passed in both a body and a " + "text_body; please choose one or the other.") + text_body = body + else: + if html_body is not None: + raise Warning("You've passed in both a body and an " + "html_body; please choose one or the other.") + html_body = body + + params = { + 'Source': source, + 'Message.Subject.Data': subject, + } + + if return_path: + params['ReturnPath'] = return_path + + if html_body is not None: + params['Message.Body.Html.Data'] = html_body + if text_body is not None: + params['Message.Body.Text.Data'] = text_body + + if(format not in ("text", "html")): + raise ValueError("'format' argument must be 'text' or 'html'") + + if(not (html_body or text_body)): + raise ValueError("No text or html body found for mail") + + self._build_list_params(params, to_addresses, + 'Destination.ToAddresses.member') + if cc_addresses: + self._build_list_params(params, cc_addresses, + 'Destination.CcAddresses.member') + + if bcc_addresses: + self._build_list_params(params, bcc_addresses, + 'Destination.BccAddresses.member') + + if reply_addresses: + self._build_list_params(params, reply_addresses, + 'ReplyToAddresses.member') + + return self._make_request('SendEmail', params) + + def send_raw_email(self, raw_message, source=None, destinations=None): + """Sends an email message, with header and content specified by the + client. The SendRawEmail action is useful for sending multipart MIME + emails, with attachments or inline content. The raw text of the message + must comply with Internet email standards; otherwise, the message + cannot be sent. + + :type source: string + :param source: The sender's email address. Amazon's docs say: + + If you specify the Source parameter, then bounce notifications and + complaints will be sent to this email address. This takes precedence + over any Return-Path header that you might include in the raw text of + the message. + + :type raw_message: string + :param raw_message: The raw text of the message. The client is + responsible for ensuring the following: + + - Message must contain a header and a body, separated by a blank line. + - All required header fields must be present. + - Each part of a multipart MIME message must be formatted properly. + - MIME content types must be among those supported by Amazon SES. + Refer to the Amazon SES Developer Guide for more details. + - Content must be base64-encoded, if MIME requires it. + + :type destinations: list of strings or string + :param destinations: A list of destinations for the message. + + """ + + if isinstance(raw_message, six.text_type): + raw_message = raw_message.encode('utf-8') + + params = { + 'RawMessage.Data': base64.b64encode(raw_message), + } + + if source: + params['Source'] = source + + if destinations: + self._build_list_params(params, destinations, + 'Destinations.member') + + return self._make_request('SendRawEmail', params) + + def list_verified_email_addresses(self): + """Fetch a list of the email addresses that have been verified. + + :rtype: dict + :returns: A ListVerifiedEmailAddressesResponse structure. Note that + keys must be unicode strings. + """ + return self._make_request('ListVerifiedEmailAddresses') + + def get_send_quota(self): + """Fetches the user's current activity limits. + + :rtype: dict + :returns: A GetSendQuotaResponse structure. Note that keys must be + unicode strings. + """ + return self._make_request('GetSendQuota') + + def get_send_statistics(self): + """Fetches the user's sending statistics. The result is a list of data + points, representing the last two weeks of sending activity. + + Each data point in the list contains statistics for a 15-minute + interval. + + :rtype: dict + :returns: A GetSendStatisticsResponse structure. Note that keys must be + unicode strings. + """ + return self._make_request('GetSendStatistics') + + def delete_verified_email_address(self, email_address): + """Deletes the specified email address from the list of verified + addresses. + + :type email_adddress: string + :param email_address: The email address to be removed from the list of + verified addreses. + + :rtype: dict + :returns: A DeleteVerifiedEmailAddressResponse structure. Note that + keys must be unicode strings. + """ + return self._make_request('DeleteVerifiedEmailAddress', { + 'EmailAddress': email_address, + }) + + def verify_email_address(self, email_address): + """Verifies an email address. This action causes a confirmation email + message to be sent to the specified address. + + :type email_adddress: string + :param email_address: The email address to be verified. + + :rtype: dict + :returns: A VerifyEmailAddressResponse structure. Note that keys must + be unicode strings. + """ + return self._make_request('VerifyEmailAddress', { + 'EmailAddress': email_address, + }) + + def verify_domain_dkim(self, domain): + """ + Returns a set of DNS records, or tokens, that must be published in the + domain name's DNS to complete the DKIM verification process. These + tokens are DNS ``CNAME`` records that point to DKIM public keys hosted + by Amazon SES. To complete the DKIM verification process, these tokens + must be published in the domain's DNS. The tokens must remain + published in order for Easy DKIM signing to function correctly. + + After the tokens are added to the domain's DNS, Amazon SES will be able + to DKIM-sign email originating from that domain. To enable or disable + Easy DKIM signing for a domain, use the ``SetIdentityDkimEnabled`` + action. For more information about Easy DKIM, go to the `Amazon SES + Developer Guide + `_. + + :type domain: string + :param domain: The domain name. + + """ + return self._make_request('VerifyDomainDkim', { + 'Domain': domain, + }) + + def set_identity_dkim_enabled(self, identity, dkim_enabled): + """Enables or disables DKIM signing of email sent from an identity. + + * If Easy DKIM signing is enabled for a domain name identity (e.g., + * ``example.com``), + then Amazon SES will DKIM-sign all email sent by addresses under that + domain name (e.g., ``user@example.com``) + * If Easy DKIM signing is enabled for an email address, then Amazon SES + will DKIM-sign all email sent by that email address. + + For email addresses (e.g., ``user@example.com``), you can only enable + Easy DKIM signing if the corresponding domain (e.g., ``example.com``) + has been set up for Easy DKIM using the AWS Console or the + ``VerifyDomainDkim`` action. + + :type identity: string + :param identity: An email address or domain name. + + :type dkim_enabled: bool + :param dkim_enabled: Specifies whether or not to enable DKIM signing. + + """ + return self._make_request('SetIdentityDkimEnabled', { + 'Identity': identity, + 'DkimEnabled': 'true' if dkim_enabled else 'false' + }) + + def get_identity_dkim_attributes(self, identities): + """Get attributes associated with a list of verified identities. + + Given a list of verified identities (email addresses and/or domains), + returns a structure describing identity notification attributes. + + :type identities: list + :param identities: A list of verified identities (email addresses + and/or domains). + + """ + params = {} + self._build_list_params(params, identities, 'Identities.member') + return self._make_request('GetIdentityDkimAttributes', params) + + def list_identities(self): + """Returns a list containing all of the identities (email addresses + and domains) for a specific AWS Account, regardless of + verification status. + + :rtype: dict + :returns: A ListIdentitiesResponse structure. Note that + keys must be unicode strings. + """ + return self._make_request('ListIdentities') + + def get_identity_verification_attributes(self, identities): + """Given a list of identities (email addresses and/or domains), + returns the verification status and (for domain identities) + the verification token for each identity. + + :type identities: list of strings or string + :param identities: List of identities. + + :rtype: dict + :returns: A GetIdentityVerificationAttributesResponse structure. + Note that keys must be unicode strings. + """ + params = {} + self._build_list_params(params, identities, + 'Identities.member') + return self._make_request('GetIdentityVerificationAttributes', params) + + def verify_domain_identity(self, domain): + """Verifies a domain. + + :type domain: string + :param domain: The domain to be verified. + + :rtype: dict + :returns: A VerifyDomainIdentityResponse structure. Note that + keys must be unicode strings. + """ + return self._make_request('VerifyDomainIdentity', { + 'Domain': domain, + }) + + def verify_email_identity(self, email_address): + """Verifies an email address. This action causes a confirmation + email message to be sent to the specified address. + + :type email_adddress: string + :param email_address: The email address to be verified. + + :rtype: dict + :returns: A VerifyEmailIdentityResponse structure. Note that keys must + be unicode strings. + """ + return self._make_request('VerifyEmailIdentity', { + 'EmailAddress': email_address, + }) + + def delete_identity(self, identity): + """Deletes the specified identity (email address or domain) from + the list of verified identities. + + :type identity: string + :param identity: The identity to be deleted. + + :rtype: dict + :returns: A DeleteIdentityResponse structure. Note that keys must + be unicode strings. + """ + return self._make_request('DeleteIdentity', { + 'Identity': identity, + }) + + def set_identity_notification_topic(self, identity, notification_type, sns_topic=None): + """Sets an SNS topic to publish bounce or complaint notifications for + emails sent with the given identity as the Source. Publishing to topics + may only be disabled when feedback forwarding is enabled. + + :type identity: string + :param identity: An email address or domain name. + + :type notification_type: string + :param notification_type: The type of feedback notifications that will + be published to the specified topic. + Valid Values: Bounce | Complaint | Delivery + + :type sns_topic: string or None + :param sns_topic: The Amazon Resource Name (ARN) of the Amazon Simple + Notification Service (Amazon SNS) topic. + """ + params = { + 'Identity': identity, + 'NotificationType': notification_type + } + if sns_topic: + params['SnsTopic'] = sns_topic + return self._make_request('SetIdentityNotificationTopic', params) + + def set_identity_feedback_forwarding_enabled(self, identity, forwarding_enabled=True): + """ + Enables or disables SES feedback notification via email. + Feedback forwarding may only be disabled when both complaint and + bounce topics are set. + + :type identity: string + :param identity: An email address or domain name. + + :type forwarding_enabled: bool + :param forwarding_enabled: Specifies whether or not to enable feedback forwarding. + """ + return self._make_request('SetIdentityFeedbackForwardingEnabled', { + 'Identity': identity, + 'ForwardingEnabled': 'true' if forwarding_enabled else 'false' + }) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/ses/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/ses/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..d5649f61a6ea2055f23c12039797f473d1bf4f9a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/ses/exceptions.py @@ -0,0 +1,80 @@ +""" +Various exceptions that are specific to the SES module. +""" +from boto.exception import BotoServerError + + +class SESError(BotoServerError): + """ + Sub-class all SES-related errors from here. Don't raise this error + directly from anywhere. The only thing this gets us is the ability to + catch SESErrors separately from the more generic, top-level + BotoServerError exception. + """ + pass + + +class SESAddressNotVerifiedError(SESError): + """ + Raised when a "Reply-To" address has not been validated in SES yet. + """ + pass + + +class SESIdentityNotVerifiedError(SESError): + """ + Raised when an identity (domain or address) has not been verified in SES yet. + """ + pass + + +class SESDomainNotConfirmedError(SESError): + """ + """ + pass + + +class SESAddressBlacklistedError(SESError): + """ + After you attempt to send mail to an address, and delivery repeatedly + fails, said address is blacklisted for at least 24 hours. The blacklisting + eventually expires, and you are able to attempt delivery again. If you + attempt to send mail to a blacklisted email, this is raised. + """ + pass + + +class SESDailyQuotaExceededError(SESError): + """ + Your account's daily (rolling 24 hour total) allotment of outbound emails + has been exceeded. + """ + pass + + +class SESMaxSendingRateExceededError(SESError): + """ + Your account's requests/second limit has been exceeded. + """ + pass + + +class SESDomainEndsWithDotError(SESError): + """ + Recipient's email address' domain ends with a period/dot. + """ + pass + + +class SESLocalAddressCharacterError(SESError): + """ + An address contained a control or whitespace character. + """ + pass + + +class SESIllegalAddressError(SESError): + """ + Raised when an illegal address is encountered. + """ + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sns/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/sns/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2fb882cc4145c068966a636a33dc1135c9b1bc04 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sns/__init__.py @@ -0,0 +1,54 @@ +# Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# this is here for backward compatibility +# originally, the SNSConnection class was defined here +from boto.sns.connection import SNSConnection +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the SNS service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + return get_regions('sns', connection_cls=SNSConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.sns.connection.SNSConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.sns.connection.SNSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sns/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/sns/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..5a6da205f4b208c6dc26deed943625f1d1312ca6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sns/connection.py @@ -0,0 +1,765 @@ +# Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid +import hashlib + +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.compat import json +import boto + + +class SNSConnection(AWSQueryConnection): + """ + Amazon Simple Notification Service + Amazon Simple Notification Service (Amazon SNS) is a web service + that enables you to build distributed web-enabled applications. + Applications can use Amazon SNS to easily push real-time + notification messages to interested subscribers over multiple + delivery protocols. For more information about this product see + `http://aws.amazon.com/sns`_. For detailed information about + Amazon SNS features and their associated API calls, see the + `Amazon SNS Developer Guide`_. + + We also provide SDKs that enable you to access Amazon SNS from + your preferred programming language. The SDKs contain + functionality that automatically takes care of tasks such as: + cryptographically signing your service requests, retrying + requests, and handling error responses. For a list of available + SDKs, go to `Tools for Amazon Web Services`_. + """ + DefaultRegionName = boto.config.get('Boto', 'sns_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'sns_region_endpoint', + 'sns.us-east-1.amazonaws.com') + APIVersion = boto.config.get('Boto', 'sns_version', '2010-03-31') + + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, + profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint, + connection_cls=SNSConnection) + self.region = region + super(SNSConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _build_dict_as_list_params(self, params, dictionary, name): + """ + Serialize a parameter 'name' which value is a 'dictionary' into a list of parameters. + + See: http://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html + For example:: + + dictionary = {'PlatformPrincipal': 'foo', 'PlatformCredential': 'bar'} + name = 'Attributes' + + would result in params dict being populated with: + Attributes.entry.1.key = PlatformPrincipal + Attributes.entry.1.value = foo + Attributes.entry.2.key = PlatformCredential + Attributes.entry.2.value = bar + + :param params: the resulting parameters will be added to this dict + :param dictionary: dict - value of the serialized parameter + :param name: name of the serialized parameter + """ + items = sorted(dictionary.items(), key=lambda x:x[0]) + for kv, index in zip(items, list(range(1, len(items)+1))): + key, value = kv + prefix = '%s.entry.%s' % (name, index) + params['%s.key' % prefix] = key + params['%s.value' % prefix] = value + + def _required_auth_capability(self): + return ['hmac-v4'] + + def get_all_topics(self, next_token=None): + """ + :type next_token: string + :param next_token: Token returned by the previous call to + this method. + + """ + params = {} + if next_token: + params['NextToken'] = next_token + return self._make_request('ListTopics', params) + + def get_topic_attributes(self, topic): + """ + Get attributes of a Topic + + :type topic: string + :param topic: The ARN of the topic. + + """ + params = {'TopicArn': topic} + return self._make_request('GetTopicAttributes', params) + + def set_topic_attributes(self, topic, attr_name, attr_value): + """ + Get attributes of a Topic + + :type topic: string + :param topic: The ARN of the topic. + + :type attr_name: string + :param attr_name: The name of the attribute you want to set. + Only a subset of the topic's attributes are mutable. + Valid values: Policy | DisplayName + + :type attr_value: string + :param attr_value: The new value for the attribute. + + """ + params = {'TopicArn': topic, + 'AttributeName': attr_name, + 'AttributeValue': attr_value} + return self._make_request('SetTopicAttributes', params) + + def add_permission(self, topic, label, account_ids, actions): + """ + Adds a statement to a topic's access control policy, granting + access for the specified AWS accounts to the specified actions. + + :type topic: string + :param topic: The ARN of the topic. + + :type label: string + :param label: A unique identifier for the new policy statement. + + :type account_ids: list of strings + :param account_ids: The AWS account ids of the users who will be + give access to the specified actions. + + :type actions: list of strings + :param actions: The actions you want to allow for each of the + specified principal(s). + + """ + params = {'TopicArn': topic, + 'Label': label} + self.build_list_params(params, account_ids, 'AWSAccountId.member') + self.build_list_params(params, actions, 'ActionName.member') + return self._make_request('AddPermission', params) + + def remove_permission(self, topic, label): + """ + Removes a statement from a topic's access control policy. + + :type topic: string + :param topic: The ARN of the topic. + + :type label: string + :param label: A unique identifier for the policy statement + to be removed. + + """ + params = {'TopicArn': topic, + 'Label': label} + return self._make_request('RemovePermission', params) + + def create_topic(self, topic): + """ + Create a new Topic. + + :type topic: string + :param topic: The name of the new topic. + + """ + params = {'Name': topic} + return self._make_request('CreateTopic', params) + + def delete_topic(self, topic): + """ + Delete an existing topic + + :type topic: string + :param topic: The ARN of the topic + + """ + params = {'TopicArn': topic} + return self._make_request('DeleteTopic', params, '/', 'GET') + + def publish(self, topic=None, message=None, subject=None, target_arn=None, + message_structure=None, message_attributes=None): + """ + Get properties of a Topic + + :type topic: string + :param topic: The ARN of the new topic. + + :type message: string + :param message: The message you want to send to the topic. + Messages must be UTF-8 encoded strings and + be at most 4KB in size. + + :type message_structure: string + :param message_structure: Optional parameter. If left as ``None``, + plain text will be sent. If set to ``json``, + your message should be a JSON string that + matches the structure described at + http://docs.aws.amazon.com/sns/latest/dg/PublishTopic.html#sns-message-formatting-by-protocol + + :type message_attributes: dict + :param message_attributes: Message attributes to set. Should be + of the form: + + .. code-block:: python + + { + "name1": { + "data_type": "Number", + "string_value": "42" + }, + "name2": { + "data_type": "String", + "string_value": "Bob" + } + } + + :type subject: string + :param subject: Optional parameter to be used as the "Subject" + line of the email notifications. + + :type target_arn: string + :param target_arn: Optional parameter for either TopicArn or + EndpointArn, but not both. + + """ + if message is None: + # To be backwards compatible when message did not have + # a default value and topic and message were required + # args. + raise TypeError("'message' is a required parameter") + params = {'Message': message} + if subject is not None: + params['Subject'] = subject + if topic is not None: + params['TopicArn'] = topic + if target_arn is not None: + params['TargetArn'] = target_arn + if message_structure is not None: + params['MessageStructure'] = message_structure + if message_attributes is not None: + keys = sorted(message_attributes.keys()) + for i, name in enumerate(keys, start=1): + attribute = message_attributes[name] + params['MessageAttributes.entry.{0}.Name'.format(i)] = name + if 'data_type' in attribute: + params['MessageAttributes.entry.{0}.Value.DataType'.format(i)] = \ + attribute['data_type'] + if 'string_value' in attribute: + params['MessageAttributes.entry.{0}.Value.StringValue'.format(i)] = \ + attribute['string_value'] + if 'binary_value' in attribute: + params['MessageAttributes.entry.{0}.Value.BinaryValue'.format(i)] = \ + attribute['binary_value'] + return self._make_request('Publish', params, '/', 'POST') + + def subscribe(self, topic, protocol, endpoint): + """ + Subscribe to a Topic. + + :type topic: string + :param topic: The ARN of the new topic. + + :type protocol: string + :param protocol: The protocol used to communicate with + the subscriber. Current choices are: + email|email-json|http|https|sqs|sms|application + + :type endpoint: string + :param endpoint: The location of the endpoint for + the subscriber. + * For email, this would be a valid email address + * For email-json, this would be a valid email address + * For http, this would be a URL beginning with http + * For https, this would be a URL beginning with https + * For sqs, this would be the ARN of an SQS Queue + * For sms, this would be a phone number of an + SMS-enabled device + * For application, the endpoint is the EndpointArn + of a mobile app and device. + """ + params = {'TopicArn': topic, + 'Protocol': protocol, + 'Endpoint': endpoint} + return self._make_request('Subscribe', params) + + def subscribe_sqs_queue(self, topic, queue): + """ + Subscribe an SQS queue to a topic. + + This is convenience method that handles most of the complexity involved + in using an SQS queue as an endpoint for an SNS topic. To achieve this + the following operations are performed: + + * The correct ARN is constructed for the SQS queue and that ARN is + then subscribed to the topic. + * A JSON policy document is contructed that grants permission to + the SNS topic to send messages to the SQS queue. + * This JSON policy is then associated with the SQS queue using + the queue's set_attribute method. If the queue already has + a policy associated with it, this process will add a Statement to + that policy. If no policy exists, a new policy will be created. + + :type topic: string + :param topic: The ARN of the new topic. + + :type queue: A boto Queue object + :param queue: The queue you wish to subscribe to the SNS Topic. + """ + t = queue.id.split('/') + q_arn = queue.arn + sid = hashlib.md5((topic + q_arn).encode('utf-8')).hexdigest() + sid_exists = False + resp = self.subscribe(topic, 'sqs', q_arn) + attr = queue.get_attributes('Policy') + if 'Policy' in attr: + policy = json.loads(attr['Policy']) + else: + policy = {} + if 'Version' not in policy: + policy['Version'] = '2008-10-17' + if 'Statement' not in policy: + policy['Statement'] = [] + # See if a Statement with the Sid exists already. + for s in policy['Statement']: + if s['Sid'] == sid: + sid_exists = True + if not sid_exists: + statement = {'Action': 'SQS:SendMessage', + 'Effect': 'Allow', + 'Principal': {'AWS': '*'}, + 'Resource': q_arn, + 'Sid': sid, + 'Condition': {'StringLike': {'aws:SourceArn': topic}}} + policy['Statement'].append(statement) + queue.set_attribute('Policy', json.dumps(policy)) + return resp + + def confirm_subscription(self, topic, token, + authenticate_on_unsubscribe=False): + """ + Get properties of a Topic + + :type topic: string + :param topic: The ARN of the new topic. + + :type token: string + :param token: Short-lived token sent to and endpoint during + the Subscribe operation. + + :type authenticate_on_unsubscribe: bool + :param authenticate_on_unsubscribe: Optional parameter indicating + that you wish to disable + unauthenticated unsubscription + of the subscription. + + """ + params = {'TopicArn': topic, 'Token': token} + if authenticate_on_unsubscribe: + params['AuthenticateOnUnsubscribe'] = 'true' + return self._make_request('ConfirmSubscription', params) + + def unsubscribe(self, subscription): + """ + Allows endpoint owner to delete subscription. + Confirmation message will be delivered. + + :type subscription: string + :param subscription: The ARN of the subscription to be deleted. + + """ + params = {'SubscriptionArn': subscription} + return self._make_request('Unsubscribe', params) + + def get_all_subscriptions(self, next_token=None): + """ + Get list of all subscriptions. + + :type next_token: string + :param next_token: Token returned by the previous call to + this method. + + """ + params = {} + if next_token: + params['NextToken'] = next_token + return self._make_request('ListSubscriptions', params) + + def get_all_subscriptions_by_topic(self, topic, next_token=None): + """ + Get list of all subscriptions to a specific topic. + + :type topic: string + :param topic: The ARN of the topic for which you wish to + find subscriptions. + + :type next_token: string + :param next_token: Token returned by the previous call to + this method. + + """ + params = {'TopicArn': topic} + if next_token: + params['NextToken'] = next_token + return self._make_request('ListSubscriptionsByTopic', params) + + def create_platform_application(self, name=None, platform=None, + attributes=None): + """ + The `CreatePlatformApplication` action creates a platform + application object for one of the supported push notification + services, such as APNS and GCM, to which devices and mobile + apps may register. You must specify PlatformPrincipal and + PlatformCredential attributes when using the + `CreatePlatformApplication` action. The PlatformPrincipal is + received from the notification service. For APNS/APNS_SANDBOX, + PlatformPrincipal is "SSL certificate". For GCM, + PlatformPrincipal is not applicable. For ADM, + PlatformPrincipal is "client id". The PlatformCredential is + also received from the notification service. For + APNS/APNS_SANDBOX, PlatformCredential is "private key". For + GCM, PlatformCredential is "API key". For ADM, + PlatformCredential is "client secret". The + PlatformApplicationArn that is returned when using + `CreatePlatformApplication` is then used as an attribute for + the `CreatePlatformEndpoint` action. For more information, see + `Using Amazon SNS Mobile Push Notifications`_. + + :type name: string + :param name: Application names must be made up of only uppercase and + lowercase ASCII letters, numbers, underscores, hyphens, and + periods, and must be between 1 and 256 characters long. + + :type platform: string + :param platform: The following platforms are supported: ADM (Amazon + Device Messaging), APNS (Apple Push Notification Service), + APNS_SANDBOX, and GCM (Google Cloud Messaging). + + :type attributes: map + :param attributes: For a list of attributes, see + `SetPlatformApplicationAttributes`_ + + """ + params = {} + if name is not None: + params['Name'] = name + if platform is not None: + params['Platform'] = platform + if attributes is not None: + self._build_dict_as_list_params(params, attributes, 'Attributes') + return self._make_request(action='CreatePlatformApplication', + params=params) + + def set_platform_application_attributes(self, + platform_application_arn=None, + attributes=None): + """ + The `SetPlatformApplicationAttributes` action sets the + attributes of the platform application object for the + supported push notification services, such as APNS and GCM. + For more information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn for + SetPlatformApplicationAttributes action. + + :type attributes: map + :param attributes: + A map of the platform application attributes. Attributes in this map + include the following: + + + + `PlatformCredential` -- The credential received from the notification + service. For APNS/APNS_SANDBOX, PlatformCredential is "private + key". For GCM, PlatformCredential is "API key". For ADM, + PlatformCredential is "client secret". + + `PlatformPrincipal` -- The principal received from the notification + service. For APNS/APNS_SANDBOX, PlatformPrincipal is "SSL + certificate". For GCM, PlatformPrincipal is not applicable. For + ADM, PlatformPrincipal is "client id". + + `EventEndpointCreated` -- Topic ARN to which EndpointCreated event + notifications should be sent. + + `EventEndpointDeleted` -- Topic ARN to which EndpointDeleted event + notifications should be sent. + + `EventEndpointUpdated` -- Topic ARN to which EndpointUpdate event + notifications should be sent. + + `EventDeliveryFailure` -- Topic ARN to which DeliveryFailure event + notifications should be sent upon Direct Publish delivery failure + (permanent) to one of the application's endpoints. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + if attributes is not None: + self._build_dict_as_list_params(params, attributes, 'Attributes') + return self._make_request(action='SetPlatformApplicationAttributes', + params=params) + + def get_platform_application_attributes(self, + platform_application_arn=None): + """ + The `GetPlatformApplicationAttributes` action retrieves the + attributes of the platform application object for the + supported push notification services, such as APNS and GCM. + For more information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn for + GetPlatformApplicationAttributesInput. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + return self._make_request(action='GetPlatformApplicationAttributes', + params=params) + + def list_platform_applications(self, next_token=None): + """ + The `ListPlatformApplications` action lists the platform + application objects for the supported push notification + services, such as APNS and GCM. The results for + `ListPlatformApplications` are paginated and return a limited + list of applications, up to 100. If additional records are + available after the first page results, then a NextToken + string will be returned. To receive the next page, you call + `ListPlatformApplications` using the NextToken string received + from the previous call. When there are no more records to + return, NextToken will be null. For more information, see + `Using Amazon SNS Mobile Push Notifications`_. + + :type next_token: string + :param next_token: NextToken string is used when calling + ListPlatformApplications action to retrieve additional records that + are available after the first page results. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + return self._make_request(action='ListPlatformApplications', + params=params) + + def list_endpoints_by_platform_application(self, + platform_application_arn=None, + next_token=None): + """ + The `ListEndpointsByPlatformApplication` action lists the + endpoints and endpoint attributes for devices in a supported + push notification service, such as GCM and APNS. The results + for `ListEndpointsByPlatformApplication` are paginated and + return a limited list of endpoints, up to 100. If additional + records are available after the first page results, then a + NextToken string will be returned. To receive the next page, + you call `ListEndpointsByPlatformApplication` again using the + NextToken string received from the previous call. When there + are no more records to return, NextToken will be null. For + more information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn for + ListEndpointsByPlatformApplicationInput action. + + :type next_token: string + :param next_token: NextToken string is used when calling + ListEndpointsByPlatformApplication action to retrieve additional + records that are available after the first page results. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + if next_token is not None: + params['NextToken'] = next_token + return self._make_request(action='ListEndpointsByPlatformApplication', + params=params) + + def delete_platform_application(self, platform_application_arn=None): + """ + The `DeletePlatformApplication` action deletes a platform + application object for one of the supported push notification + services, such as APNS and GCM. For more information, see + `Using Amazon SNS Mobile Push Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn of platform + application object to delete. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + return self._make_request(action='DeletePlatformApplication', + params=params) + + def create_platform_endpoint(self, platform_application_arn=None, + token=None, custom_user_data=None, + attributes=None): + """ + The `CreatePlatformEndpoint` creates an endpoint for a device + and mobile app on one of the supported push notification + services, such as GCM and APNS. `CreatePlatformEndpoint` + requires the PlatformApplicationArn that is returned from + `CreatePlatformApplication`. The EndpointArn that is returned + when using `CreatePlatformEndpoint` can then be used by the + `Publish` action to send a message to a mobile app or by the + `Subscribe` action for subscription to a topic. For more + information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn returned from + CreatePlatformApplication is used to create a an endpoint. + + :type token: string + :param token: Unique identifier created by the notification service for + an app on a device. The specific name for Token will vary, + depending on which notification service is being used. For example, + when using APNS as the notification service, you need the device + token. Alternatively, when using GCM or ADM, the device token + equivalent is called the registration ID. + + :type custom_user_data: string + :param custom_user_data: Arbitrary user data to associate with the + endpoint. SNS does not use this data. The data must be in UTF-8 + format and less than 2KB. + + :type attributes: map + :param attributes: For a list of attributes, see + `SetEndpointAttributes`_. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + if token is not None: + params['Token'] = token + if custom_user_data is not None: + params['CustomUserData'] = custom_user_data + if attributes is not None: + self._build_dict_as_list_params(params, attributes, 'Attributes') + return self._make_request(action='CreatePlatformEndpoint', + params=params) + + def delete_endpoint(self, endpoint_arn=None): + """ + The `DeleteEndpoint` action, which is idempotent, deletes the + endpoint from SNS. For more information, see `Using Amazon SNS + Mobile Push Notifications`_. + + :type endpoint_arn: string + :param endpoint_arn: EndpointArn of endpoint to delete. + + """ + params = {} + if endpoint_arn is not None: + params['EndpointArn'] = endpoint_arn + return self._make_request(action='DeleteEndpoint', params=params) + + def set_endpoint_attributes(self, endpoint_arn=None, attributes=None): + """ + The `SetEndpointAttributes` action sets the attributes for an + endpoint for a device on one of the supported push + notification services, such as GCM and APNS. For more + information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type endpoint_arn: string + :param endpoint_arn: EndpointArn used for SetEndpointAttributes action. + + :type attributes: map + :param attributes: + A map of the endpoint attributes. Attributes in this map include the + following: + + + + `CustomUserData` -- arbitrary user data to associate with the + endpoint. SNS does not use this data. The data must be in UTF-8 + format and less than 2KB. + + `Enabled` -- flag that enables/disables delivery to the endpoint. + Message Processor will set this to false when a notification + service indicates to SNS that the endpoint is invalid. Users can + set it back to true, typically after updating Token. + + `Token` -- device token, also referred to as a registration id, for + an app and mobile device. This is returned from the notification + service when an app and mobile device are registered with the + notification service. + + """ + params = {} + if endpoint_arn is not None: + params['EndpointArn'] = endpoint_arn + if attributes is not None: + self._build_dict_as_list_params(params, attributes, 'Attributes') + return self._make_request(action='SetEndpointAttributes', + params=params) + + def get_endpoint_attributes(self, endpoint_arn=None): + """ + The `GetEndpointAttributes` retrieves the endpoint attributes + for a device on one of the supported push notification + services, such as GCM and APNS. For more information, see + `Using Amazon SNS Mobile Push Notifications`_. + + :type endpoint_arn: string + :param endpoint_arn: EndpointArn for GetEndpointAttributes input. + + """ + params = {} + if endpoint_arn is not None: + params['EndpointArn'] = endpoint_arn + return self._make_request(action='GetEndpointAttributes', + params=params) + + def _make_request(self, action, params, path='/', verb='GET'): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb=verb, + path=path, params=params) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sqs/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/sqs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ccad732ec3865f05b92087b5d7f083a575ffc55c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sqs/__init__.py @@ -0,0 +1,46 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.sqs.regioninfo import SQSRegionInfo +from boto.regioninfo import get_regions + + +def regions(): + """ + Get all available regions for the SQS service. + + :rtype: list + :return: A list of :class:`boto.sqs.regioninfo.RegionInfo` + """ + from boto.sqs.connection import SQSConnection + return get_regions( + 'sqs', + region_cls=SQSRegionInfo, + connection_cls=SQSConnection + ) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sqs/attributes.py b/desktop/core/ext-py/boto-2.38.0/boto/sqs/attributes.py new file mode 100644 index 0000000000000000000000000000000000000000..26c720416ffd986d35fd86fbf37faf98460ff877 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sqs/attributes.py @@ -0,0 +1,46 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SQS Attribute Name/Value set +""" + +class Attributes(dict): + + def __init__(self, parent): + self.parent = parent + self.current_key = None + self.current_value = None + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Attribute': + self[self.current_key] = self.current_value + elif name == 'Name': + self.current_key = value + elif name == 'Value': + self.current_value = value + else: + setattr(self, name, value) + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sqs/batchresults.py b/desktop/core/ext-py/boto-2.38.0/boto/sqs/batchresults.py new file mode 100644 index 0000000000000000000000000000000000000000..aa5f86b8be007172f3c586dc27896c70d196679e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sqs/batchresults.py @@ -0,0 +1,95 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +A set of results returned by SendMessageBatch. +""" + +class ResultEntry(dict): + """ + The result (successful or unsuccessful) of a single + message within a send_message_batch request. + + In the case of a successful result, this dict-like + object will contain the following items: + + :ivar id: A string containing the user-supplied ID of the message. + :ivar message_id: A string containing the SQS ID of the new message. + :ivar message_md5: A string containing the MD5 hash of the message body. + + In the case of an error, this object will contain the following + items: + + :ivar id: A string containing the user-supplied ID of the message. + :ivar sender_fault: A boolean value. + :ivar error_code: A string containing a short description of the error. + :ivar error_message: A string containing a description of the error. + """ + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self['id'] = value + elif name == 'MessageId': + self['message_id'] = value + elif name == 'MD5OfMessageBody': + self['message_md5'] = value + elif name == 'SenderFault': + self['sender_fault'] = value + elif name == 'Code': + self['error_code'] = value + elif name == 'Message': + self['error_message'] = value + +class BatchResults(object): + """ + A container for the results of a send_message_batch request. + + :ivar results: A list of successful results. Each item in the + list will be an instance of :class:`ResultEntry`. + + :ivar errors: A list of unsuccessful results. Each item in the + list will be an instance of :class:`ResultEntry`. + """ + + def __init__(self, parent): + self.parent = parent + self.results = [] + self.errors = [] + + def startElement(self, name, attrs, connection): + if name.endswith('MessageBatchResultEntry'): + entry = ResultEntry() + self.results.append(entry) + return entry + if name == 'BatchResultErrorEntry': + entry = ResultEntry() + self.errors.append(entry) + return entry + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sqs/bigmessage.py b/desktop/core/ext-py/boto-2.38.0/boto/sqs/bigmessage.py new file mode 100644 index 0000000000000000000000000000000000000000..e47ec045a1342c2df59c8a24985d7538503a9c96 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sqs/bigmessage.py @@ -0,0 +1,119 @@ +# Copyright (c) 2013 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid + +import boto +from boto.sqs.message import RawMessage +from boto.exception import SQSDecodeError + + +class BigMessage(RawMessage): + """ + The BigMessage class provides large payloads (up to 5GB) + by storing the payload itself in S3 and then placing a reference + to the S3 object in the actual SQS message payload. + + To create a BigMessage, you should create a BigMessage object + and pass in a file-like object as the ``body`` param and also + pass in the an S3 URL specifying the bucket in which to store + the message body:: + + import boto.sqs + from boto.sqs.bigmessage import BigMessage + + sqs = boto.sqs.connect_to_region('us-west-2') + queue = sqs.get_queue('myqueue') + fp = open('/path/to/bigmessage/data') + msg = BigMessage(queue, fp, 's3://mybucket') + queue.write(msg) + + Passing in a fully-qualified S3 URL (e.g. s3://mybucket/foo) + is interpreted to mean that the body of the message is already + stored in S3 and the that S3 URL is then used directly with no + content uploaded by BigMessage. + """ + + def __init__(self, queue=None, body=None, s3_url=None): + self.s3_url = s3_url + super(BigMessage, self).__init__(queue, body) + + def _get_bucket_key(self, s3_url): + bucket_name = key_name = None + if s3_url: + if s3_url.startswith('s3://'): + # We need to split out the bucket from the key (if + # supplied). We also have to be aware that someone + # may provide a trailing '/' character as in: + # s3://foo/ and we want to handle that. + s3_components = s3_url[5:].split('/', 1) + bucket_name = s3_components[0] + if len(s3_components) > 1: + if s3_components[1]: + key_name = s3_components[1] + else: + msg = 's3_url parameter should start with s3://' + raise SQSDecodeError(msg, self) + return bucket_name, key_name + + def encode(self, value): + """ + :type value: file-like object + :param value: A file-like object containing the content + of the message. The actual content will be stored + in S3 and a link to the S3 object will be stored in + the message body. + """ + bucket_name, key_name = self._get_bucket_key(self.s3_url) + if bucket_name and key_name: + return self.s3_url + key_name = uuid.uuid4() + s3_conn = boto.connect_s3() + s3_bucket = s3_conn.get_bucket(bucket_name) + key = s3_bucket.new_key(key_name) + key.set_contents_from_file(value) + self.s3_url = 's3://%s/%s' % (bucket_name, key_name) + return self.s3_url + + def _get_s3_object(self, s3_url): + bucket_name, key_name = self._get_bucket_key(s3_url) + if bucket_name and key_name: + s3_conn = boto.connect_s3() + s3_bucket = s3_conn.get_bucket(bucket_name) + key = s3_bucket.get_key(key_name) + return key + else: + msg = 'Unable to decode S3 URL: %s' % s3_url + raise SQSDecodeError(msg, self) + + def decode(self, value): + self.s3_url = value + key = self._get_s3_object(value) + return key.get_contents_as_string() + + def delete(self): + # Delete the object in S3 first, then delete the SQS message + if self.s3_url: + key = self._get_s3_object(self.s3_url) + key.delete() + super(BigMessage, self).delete() + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sqs/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/sqs/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..0eafc1fadf7a234a58eb16cb62985bde3831f783 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sqs/connection.py @@ -0,0 +1,537 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.connection import AWSQueryConnection +from boto.sqs.regioninfo import SQSRegionInfo +from boto.sqs.queue import Queue +from boto.sqs.message import Message +from boto.sqs.attributes import Attributes +from boto.sqs.batchresults import BatchResults +from boto.exception import SQSError, BotoServerError + + +class SQSConnection(AWSQueryConnection): + """ + A Connection to the SQS Service. + """ + DefaultRegionName = boto.config.get('Boto', 'sqs_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'sqs_region_endpoint', + 'queue.amazonaws.com') + APIVersion = boto.config.get('Boto', 'sqs_version', '2012-11-05') + DefaultContentType = 'text/plain' + ResponseError = SQSError + AuthServiceName = 'sqs' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + if not region: + region = SQSRegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(SQSConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, + proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + self.auth_region_name = self.region.name + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_queue(self, queue_name, visibility_timeout=None): + """ + Create an SQS Queue. + + :type queue_name: str or unicode + :param queue_name: The name of the new queue. Names are + scoped to an account and need to be unique within that + account. Calling this method on an existing queue name + will not return an error from SQS unless the value for + visibility_timeout is different than the value of the + existing queue of that name. This is still an expensive + operation, though, and not the preferred way to check for + the existence of a queue. See the + :func:`boto.sqs.connection.SQSConnection.lookup` method. + + :type visibility_timeout: int + :param visibility_timeout: The default visibility timeout for + all messages written in the queue. This can be overridden + on a per-message. + + :rtype: :class:`boto.sqs.queue.Queue` + :return: The newly created queue. + + """ + params = {'QueueName': queue_name} + if visibility_timeout: + params['Attribute.1.Name'] = 'VisibilityTimeout' + params['Attribute.1.Value'] = int(visibility_timeout) + return self.get_object('CreateQueue', params, Queue) + + def delete_queue(self, queue, force_deletion=False): + """ + Delete an SQS Queue. + + :type queue: A Queue object + :param queue: The SQS queue to be deleted + + :type force_deletion: Boolean + :param force_deletion: A deprecated parameter that is no longer used by + SQS's API. + + :rtype: bool + :return: True if the command succeeded, False otherwise + """ + return self.get_status('DeleteQueue', None, queue.id) + + def purge_queue(self, queue): + """ + Purge all messages in an SQS Queue. + + :type queue: A Queue object + :param queue: The SQS queue to be purged + + :rtype: bool + :return: True if the command succeeded, False otherwise + """ + return self.get_status('PurgeQueue', None, queue.id) + + def get_queue_attributes(self, queue, attribute='All'): + """ + Gets one or all attributes of a Queue + + :type queue: A Queue object + :param queue: The SQS queue to get attributes for + + :type attribute: str + :type attribute: The specific attribute requested. If not + supplied, the default is to return all attributes. Valid + attributes are: + + * All + * ApproximateNumberOfMessages + * ApproximateNumberOfMessagesNotVisible + * VisibilityTimeout + * CreatedTimestamp + * LastModifiedTimestamp + * Policy + * MaximumMessageSize + * MessageRetentionPeriod + * QueueArn + * ApproximateNumberOfMessagesDelayed + * DelaySeconds + * ReceiveMessageWaitTimeSeconds + * RedrivePolicy + + :rtype: :class:`boto.sqs.attributes.Attributes` + :return: An Attributes object containing request value(s). + """ + params = {'AttributeName' : attribute} + return self.get_object('GetQueueAttributes', params, + Attributes, queue.id) + + def set_queue_attribute(self, queue, attribute, value): + params = {'Attribute.Name' : attribute, 'Attribute.Value' : value} + return self.get_status('SetQueueAttributes', params, queue.id) + + def receive_message(self, queue, number_messages=1, + visibility_timeout=None, attributes=None, + wait_time_seconds=None, message_attributes=None): + """ + Read messages from an SQS Queue. + + :type queue: A Queue object + :param queue: The Queue from which messages are read. + + :type number_messages: int + :param number_messages: The maximum number of messages to read + (default=1) + + :type visibility_timeout: int + :param visibility_timeout: The number of seconds the message should + remain invisible to other queue readers + (default=None which uses the Queues default) + + :type attributes: str + :param attributes: The name of additional attribute to return + with response or All if you want all attributes. The + default is to return no additional attributes. Valid + values: + * All + * SenderId + * SentTimestamp + * ApproximateReceiveCount + * ApproximateFirstReceiveTimestamp + + :type wait_time_seconds: int + :param wait_time_seconds: The duration (in seconds) for which the call + will wait for a message to arrive in the queue before returning. + If a message is available, the call will return sooner than + wait_time_seconds. + + :type message_attributes: list + :param message_attributes: The name(s) of additional message + attributes to return. The default is to return no additional + message attributes. Use ``['All']`` or ``['.*']`` to return all. + + :rtype: list + :return: A list of :class:`boto.sqs.message.Message` objects. + + """ + params = {'MaxNumberOfMessages' : number_messages} + if visibility_timeout is not None: + params['VisibilityTimeout'] = visibility_timeout + if attributes is not None: + self.build_list_params(params, attributes, 'AttributeName') + if wait_time_seconds is not None: + params['WaitTimeSeconds'] = wait_time_seconds + if message_attributes is not None: + self.build_list_params(params, message_attributes, + 'MessageAttributeName') + return self.get_list('ReceiveMessage', params, + [('Message', queue.message_class)], + queue.id, queue) + + def delete_message(self, queue, message): + """ + Delete a message from a queue. + + :type queue: A :class:`boto.sqs.queue.Queue` object + :param queue: The Queue from which messages are read. + + :type message: A :class:`boto.sqs.message.Message` object + :param message: The Message to be deleted + + :rtype: bool + :return: True if successful, False otherwise. + """ + params = {'ReceiptHandle' : message.receipt_handle} + return self.get_status('DeleteMessage', params, queue.id) + + def delete_message_batch(self, queue, messages): + """ + Deletes a list of messages from a queue in a single request. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type messages: List of :class:`boto.sqs.message.Message` objects. + :param messages: A list of message objects. + """ + params = {} + for i, msg in enumerate(messages): + prefix = 'DeleteMessageBatchRequestEntry' + p_name = '%s.%i.Id' % (prefix, (i+1)) + params[p_name] = msg.id + p_name = '%s.%i.ReceiptHandle' % (prefix, (i+1)) + params[p_name] = msg.receipt_handle + return self.get_object('DeleteMessageBatch', params, BatchResults, + queue.id, verb='POST') + + def delete_message_from_handle(self, queue, receipt_handle): + """ + Delete a message from a queue, given a receipt handle. + + :type queue: A :class:`boto.sqs.queue.Queue` object + :param queue: The Queue from which messages are read. + + :type receipt_handle: str + :param receipt_handle: The receipt handle for the message + + :rtype: bool + :return: True if successful, False otherwise. + """ + params = {'ReceiptHandle' : receipt_handle} + return self.get_status('DeleteMessage', params, queue.id) + + def send_message(self, queue, message_content, delay_seconds=None, + message_attributes=None): + """ + Send a new message to the queue. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type message_content: string + :param message_content: The body of the message + + :type delay_seconds: int + :param delay_seconds: Number of seconds (0 - 900) to delay this + message from being processed. + + :type message_attributes: dict + :param message_attributes: Message attributes to set. Should be + of the form: + + { + "name1": { + "data_type": "Number", + "string_value": "1" + }, + "name2": { + "data_type": "String", + "string_value": "Bob" + } + } + + """ + params = {'MessageBody' : message_content} + if delay_seconds: + params['DelaySeconds'] = int(delay_seconds) + + if message_attributes is not None: + keys = sorted(message_attributes.keys()) + for i, name in enumerate(keys, start=1): + attribute = message_attributes[name] + params['MessageAttribute.%s.Name' % i] = name + if 'data_type' in attribute: + params['MessageAttribute.%s.Value.DataType' % i] = \ + attribute['data_type'] + if 'string_value' in attribute: + params['MessageAttribute.%s.Value.StringValue' % i] = \ + attribute['string_value'] + if 'binary_value' in attribute: + params['MessageAttribute.%s.Value.BinaryValue' % i] = \ + attribute['binary_value'] + if 'string_list_value' in attribute: + params['MessageAttribute.%s.Value.StringListValue' % i] = \ + attribute['string_list_value'] + if 'binary_list_value' in attribute: + params['MessageAttribute.%s.Value.BinaryListValue' % i] = \ + attribute['binary_list_value'] + + return self.get_object('SendMessage', params, Message, + queue.id, verb='POST') + + def send_message_batch(self, queue, messages): + """ + Delivers up to 10 messages to a queue in a single request. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type messages: List of lists. + :param messages: A list of lists or tuples. Each inner + tuple represents a single message to be written + and consists of and ID (string) that must be unique + within the list of messages, the message body itself + which can be a maximum of 64K in length, an + integer which represents the delay time (in seconds) + for the message (0-900) before the message will + be delivered to the queue, and an optional dict of + message attributes like those passed to ``send_message`` + above. + + """ + params = {} + for i, msg in enumerate(messages): + base = 'SendMessageBatchRequestEntry.%i' % (i + 1) + params['%s.Id' % base] = msg[0] + params['%s.MessageBody' % base] = msg[1] + params['%s.DelaySeconds' % base] = msg[2] + if len(msg) > 3: + base += '.MessageAttribute' + keys = sorted(msg[3].keys()) + for j, name in enumerate(keys): + attribute = msg[3][name] + + p_name = '%s.%i.Name' % (base, j + 1) + params[p_name] = name + + if 'data_type' in attribute: + p_name = '%s.%i.DataType' % (base, j + 1) + params[p_name] = attribute['data_type'] + if 'string_value' in attribute: + p_name = '%s.%i.StringValue' % (base, j + 1) + params[p_name] = attribute['string_value'] + if 'binary_value' in attribute: + p_name = '%s.%i.BinaryValue' % (base, j + 1) + params[p_name] = attribute['binary_value'] + if 'string_list_value' in attribute: + p_name = '%s.%i.StringListValue' % (base, j + 1) + params[p_name] = attribute['string_list_value'] + if 'binary_list_value' in attribute: + p_name = '%s.%i.BinaryListValue' % (base, j + 1) + params[p_name] = attribute['binary_list_value'] + + return self.get_object('SendMessageBatch', params, BatchResults, + queue.id, verb='POST') + + def change_message_visibility(self, queue, receipt_handle, + visibility_timeout): + """ + Extends the read lock timeout for the specified message from + the specified queue to the specified value. + + :type queue: A :class:`boto.sqs.queue.Queue` object + :param queue: The Queue from which messages are read. + + :type receipt_handle: str + :param receipt_handle: The receipt handle associated with the message + whose visibility timeout will be changed. + + :type visibility_timeout: int + :param visibility_timeout: The new value of the message's visibility + timeout in seconds. + """ + params = {'ReceiptHandle' : receipt_handle, + 'VisibilityTimeout' : visibility_timeout} + return self.get_status('ChangeMessageVisibility', params, queue.id) + + def change_message_visibility_batch(self, queue, messages): + """ + A batch version of change_message_visibility that can act + on up to 10 messages at a time. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type messages: List of tuples. + :param messages: A list of tuples where each tuple consists + of a :class:`boto.sqs.message.Message` object and an integer + that represents the new visibility timeout for that message. + """ + params = {} + for i, t in enumerate(messages): + prefix = 'ChangeMessageVisibilityBatchRequestEntry' + p_name = '%s.%i.Id' % (prefix, (i+1)) + params[p_name] = t[0].id + p_name = '%s.%i.ReceiptHandle' % (prefix, (i+1)) + params[p_name] = t[0].receipt_handle + p_name = '%s.%i.VisibilityTimeout' % (prefix, (i+1)) + params[p_name] = t[1] + return self.get_object('ChangeMessageVisibilityBatch', + params, BatchResults, + queue.id, verb='POST') + + def get_all_queues(self, prefix=''): + """ + Retrieves all queues. + + :keyword str prefix: Optionally, only return queues that start with + this value. + :rtype: list + :returns: A list of :py:class:`boto.sqs.queue.Queue` instances. + """ + params = {} + if prefix: + params['QueueNamePrefix'] = prefix + return self.get_list('ListQueues', params, [('QueueUrl', Queue)]) + + def get_queue(self, queue_name, owner_acct_id=None): + """ + Retrieves the queue with the given name, or ``None`` if no match + was found. + + :param str queue_name: The name of the queue to retrieve. + :param str owner_acct_id: Optionally, the AWS account ID of the account that created the queue. + :rtype: :py:class:`boto.sqs.queue.Queue` or ``None`` + :returns: The requested queue, or ``None`` if no match was found. + """ + params = {'QueueName': queue_name} + if owner_acct_id: + params['QueueOwnerAWSAccountId']=owner_acct_id + try: + return self.get_object('GetQueueUrl', params, Queue) + except SQSError: + return None + + lookup = get_queue + + def get_dead_letter_source_queues(self, queue): + """ + Retrieves the dead letter source queues for a given queue. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The queue for which to get DL source queues + :rtype: list + :returns: A list of :py:class:`boto.sqs.queue.Queue` instances. + """ + params = {'QueueUrl': queue.url} + return self.get_list('ListDeadLetterSourceQueues', params, + [('QueueUrl', Queue)]) + + # + # Permissions methods + # + + def add_permission(self, queue, label, aws_account_id, action_name): + """ + Add a permission to a queue. + + :type queue: :class:`boto.sqs.queue.Queue` + :param queue: The queue object + + :type label: str or unicode + :param label: A unique identification of the permission you are setting. + Maximum of 80 characters ``[0-9a-zA-Z_-]`` + Example, AliceSendMessage + + :type aws_account_id: str or unicode + :param principal_id: The AWS account number of the principal + who will be given permission. The principal must have an + AWS account, but does not need to be signed up for Amazon + SQS. For information about locating the AWS account + identification. + + :type action_name: str or unicode + :param action_name: The action. Valid choices are: + * * + * SendMessage + * ReceiveMessage + * DeleteMessage + * ChangeMessageVisibility + * GetQueueAttributes + + :rtype: bool + :return: True if successful, False otherwise. + + """ + params = {'Label': label, + 'AWSAccountId' : aws_account_id, + 'ActionName' : action_name} + return self.get_status('AddPermission', params, queue.id) + + def remove_permission(self, queue, label): + """ + Remove a permission from a queue. + + :type queue: :class:`boto.sqs.queue.Queue` + :param queue: The queue object + + :type label: str or unicode + :param label: The unique label associated with the permission + being removed. + + :rtype: bool + :return: True if successful, False otherwise. + """ + params = {'Label': label} + return self.get_status('RemovePermission', params, queue.id) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sqs/jsonmessage.py b/desktop/core/ext-py/boto-2.38.0/boto/sqs/jsonmessage.py new file mode 100644 index 0000000000000000000000000000000000000000..520eb8eb06e2f74b4229acbe111da5d587f3c5d8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sqs/jsonmessage.py @@ -0,0 +1,43 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import base64 + +from boto.sqs.message import MHMessage +from boto.exception import SQSDecodeError +from boto.compat import json + + +class JSONMessage(MHMessage): + """ + Acts like a dictionary but encodes it's data as a Base64 encoded JSON payload. + """ + + def decode(self, value): + try: + value = base64.b64decode(value.encode('utf-8')).decode('utf-8') + value = json.loads(value) + except: + raise SQSDecodeError('Unable to decode message', self) + return value + + def encode(self, value): + value = json.dumps(value) + return base64.b64encode(value.encode('utf-8')).decode('utf-8') diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sqs/message.py b/desktop/core/ext-py/boto-2.38.0/boto/sqs/message.py new file mode 100644 index 0000000000000000000000000000000000000000..656734fa6739f4ddf8870d24f93442e61ab8fb64 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sqs/message.py @@ -0,0 +1,271 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +SQS Message + +A Message represents the data stored in an SQS queue. The rules for what is allowed within an SQS +Message are here: + + http://docs.amazonwebservices.com/AWSSimpleQueueService/2008-01-01/SQSDeveloperGuide/Query_QuerySendMessage.html + +So, at it's simplest level a Message just needs to allow a developer to store bytes in it and get the bytes +back out. However, to allow messages to have richer semantics, the Message class must support the +following interfaces: + +The constructor for the Message class must accept a keyword parameter "queue" which is an instance of a +boto Queue object and represents the queue that the message will be stored in. The default value for +this parameter is None. + +The constructor for the Message class must accept a keyword parameter "body" which represents the +content or body of the message. The format of this parameter will depend on the behavior of the +particular Message subclass. For example, if the Message subclass provides dictionary-like behavior to the +user the body passed to the constructor should be a dict-like object that can be used to populate +the initial state of the message. + +The Message class must provide an encode method that accepts a value of the same type as the body +parameter of the constructor and returns a string of characters that are able to be stored in an +SQS message body (see rules above). + +The Message class must provide a decode method that accepts a string of characters that can be +stored (and probably were stored!) in an SQS message and return an object of a type that is consistent +with the "body" parameter accepted on the class constructor. + +The Message class must provide a __len__ method that will return the size of the encoded message +that would be stored in SQS based on the current state of the Message object. + +The Message class must provide a get_body method that will return the body of the message in the +same format accepted in the constructor of the class. + +The Message class must provide a set_body method that accepts a message body in the same format +accepted by the constructor of the class. This method should alter to the internal state of the +Message object to reflect the state represented in the message body parameter. + +The Message class must provide a get_body_encoded method that returns the current body of the message +in the format in which it would be stored in SQS. +""" + +import base64 + +import boto + +from boto.compat import StringIO +from boto.compat import six +from boto.sqs.attributes import Attributes +from boto.sqs.messageattributes import MessageAttributes +from boto.exception import SQSDecodeError + +class RawMessage(object): + """ + Base class for SQS messages. RawMessage does not encode the message + in any way. Whatever you store in the body of the message is what + will be written to SQS and whatever is returned from SQS is stored + directly into the body of the message. + """ + + def __init__(self, queue=None, body=''): + self.queue = queue + self.set_body(body) + self.id = None + self.receipt_handle = None + self.md5 = None + self.attributes = Attributes(self) + self.message_attributes = MessageAttributes(self) + self.md5_message_attributes = None + + def __len__(self): + return len(self.encode(self._body)) + + def startElement(self, name, attrs, connection): + if name == 'Attribute': + return self.attributes + if name == 'MessageAttribute': + return self.message_attributes + return None + + def endElement(self, name, value, connection): + if name == 'Body': + self.set_body(value) + elif name == 'MessageId': + self.id = value + elif name == 'ReceiptHandle': + self.receipt_handle = value + elif name == 'MD5OfBody': + self.md5 = value + elif name == 'MD5OfMessageAttributes': + self.md5_message_attributes = value + else: + setattr(self, name, value) + + def endNode(self, connection): + self.set_body(self.decode(self.get_body())) + + def encode(self, value): + """Transform body object into serialized byte array format.""" + return value + + def decode(self, value): + """Transform seralized byte array into any object.""" + return value + + def set_body(self, body): + """Override the current body for this object, using decoded format.""" + self._body = body + + def get_body(self): + return self._body + + def get_body_encoded(self): + """ + This method is really a semi-private method used by the Queue.write + method when writing the contents of the message to SQS. + You probably shouldn't need to call this method in the normal course of events. + """ + return self.encode(self.get_body()) + + def delete(self): + if self.queue: + return self.queue.delete_message(self) + + def change_visibility(self, visibility_timeout): + if self.queue: + self.queue.connection.change_message_visibility(self.queue, + self.receipt_handle, + visibility_timeout) + +class Message(RawMessage): + """ + The default Message class used for SQS queues. This class automatically + encodes/decodes the message body using Base64 encoding to avoid any + illegal characters in the message body. See: + + https://forums.aws.amazon.com/thread.jspa?threadID=13067 + + for details on why this is a good idea. The encode/decode is meant to + be transparent to the end-user. + """ + + def encode(self, value): + if not isinstance(value, six.binary_type): + value = value.encode('utf-8') + return base64.b64encode(value).decode('utf-8') + + def decode(self, value): + try: + value = base64.b64decode(value.encode('utf-8')).decode('utf-8') + except: + boto.log.warning('Unable to decode message') + return value + return value + +class MHMessage(Message): + """ + The MHMessage class provides a message that provides RFC821-like + headers like this: + + HeaderName: HeaderValue + + The encoding/decoding of this is handled automatically and after + the message body has been read, the message instance can be treated + like a mapping object, i.e. m['HeaderName'] would return 'HeaderValue'. + """ + + def __init__(self, queue=None, body=None, xml_attrs=None): + if body is None or body == '': + body = {} + super(MHMessage, self).__init__(queue, body) + + def decode(self, value): + try: + msg = {} + fp = StringIO(value) + line = fp.readline() + while line: + delim = line.find(':') + key = line[0:delim] + value = line[delim+1:].strip() + msg[key.strip()] = value.strip() + line = fp.readline() + except: + raise SQSDecodeError('Unable to decode message', self) + return msg + + def encode(self, value): + s = '' + for item in value.items(): + s = s + '%s: %s\n' % (item[0], item[1]) + return s + + def __contains__(self, key): + return key in self._body + + def __getitem__(self, key): + if key in self._body: + return self._body[key] + else: + raise KeyError(key) + + def __setitem__(self, key, value): + self._body[key] = value + self.set_body(self._body) + + def keys(self): + return self._body.keys() + + def values(self): + return self._body.values() + + def items(self): + return self._body.items() + + def has_key(self, key): + return key in self._body + + def update(self, d): + self._body.update(d) + self.set_body(self._body) + + def get(self, key, default=None): + return self._body.get(key, default) + +class EncodedMHMessage(MHMessage): + """ + The EncodedMHMessage class provides a message that provides RFC821-like + headers like this: + + HeaderName: HeaderValue + + This variation encodes/decodes the body of the message in base64 automatically. + The message instance can be treated like a mapping object, + i.e. m['HeaderName'] would return 'HeaderValue'. + """ + + def decode(self, value): + try: + value = base64.b64decode(value.encode('utf-8')).decode('utf-8') + except: + raise SQSDecodeError('Unable to decode message', self) + return super(EncodedMHMessage, self).decode(value) + + def encode(self, value): + value = super(EncodedMHMessage, self).encode(value) + return base64.b64encode(value.encode('utf-8')).decode('utf-8') + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sqs/messageattributes.py b/desktop/core/ext-py/boto-2.38.0/boto/sqs/messageattributes.py new file mode 100644 index 0000000000000000000000000000000000000000..7e61bf3668d90daa76bf93a43781a5778190ad29 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sqs/messageattributes.py @@ -0,0 +1,66 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Amazon.com, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SQS MessageAttribute Name/Value set +""" + +class MessageAttributes(dict): + def __init__(self, parent): + self.parent = parent + self.current_key = None + self.current_value = None + + def startElement(self, name, attrs, connection): + if name == 'Value': + self.current_value = MessageAttributeValue(self) + return self.current_value + + def endElement(self, name, value, connection): + if name == 'MessageAttribute': + self[self.current_key] = self.current_value + elif name == 'Name': + self.current_key = value + elif name == 'Value': + pass + else: + setattr(self, name, value) + + +class MessageAttributeValue(dict): + def __init__(self, parent): + self.parent = parent + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'DataType': + self['data_type'] = value + elif name == 'StringValue': + self['string_value'] = value + elif name == 'BinaryValue': + self['binary_value'] = value + elif name == 'StringListValue': + self['string_list_value'] = value + elif name == 'BinaryListValue': + self['binary_list_value'] = value diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sqs/queue.py b/desktop/core/ext-py/boto-2.38.0/boto/sqs/queue.py new file mode 100644 index 0000000000000000000000000000000000000000..bf3720d95f253271ee470305a46d943f784eb05c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sqs/queue.py @@ -0,0 +1,492 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SQS Queue +""" +from boto.compat import urllib +from boto.sqs.message import Message + + +class Queue(object): + + def __init__(self, connection=None, url=None, message_class=Message): + self.connection = connection + self.url = url + self.message_class = message_class + self.visibility_timeout = None + + def __repr__(self): + return 'Queue(%s)' % self.url + + def _id(self): + if self.url: + val = urllib.parse.urlparse(self.url)[2] + else: + val = self.url + return val + id = property(_id) + + def _name(self): + if self.url: + val = urllib.parse.urlparse(self.url)[2].split('/')[2] + else: + val = self.url + return val + name = property(_name) + + def _arn(self): + parts = self.id.split('/') + return 'arn:aws:sqs:%s:%s:%s' % ( + self.connection.region.name, parts[1], parts[2]) + arn = property(_arn) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'QueueUrl': + self.url = value + elif name == 'VisibilityTimeout': + self.visibility_timeout = int(value) + else: + setattr(self, name, value) + + def set_message_class(self, message_class): + """ + Set the message class that should be used when instantiating + messages read from the queue. By default, the class + :class:`boto.sqs.message.Message` is used but this can be overriden + with any class that behaves like a message. + + :type message_class: Message-like class + :param message_class: The new Message class + """ + self.message_class = message_class + + def get_attributes(self, attributes='All'): + """ + Retrieves attributes about this queue object and returns + them in an Attribute instance (subclass of a Dictionary). + + :type attributes: string + :param attributes: String containing one of: + ApproximateNumberOfMessages, + ApproximateNumberOfMessagesNotVisible, + VisibilityTimeout, + CreatedTimestamp, + LastModifiedTimestamp, + Policy + ReceiveMessageWaitTimeSeconds + :rtype: Attribute object + :return: An Attribute object which is a mapping type holding the + requested name/value pairs + """ + return self.connection.get_queue_attributes(self, attributes) + + def set_attribute(self, attribute, value): + """ + Set a new value for an attribute of the Queue. + + :type attribute: String + :param attribute: The name of the attribute you want to set. The + only valid value at this time is: VisibilityTimeout + :type value: int + :param value: The new value for the attribute. + For VisibilityTimeout the value must be an + integer number of seconds from 0 to 86400. + + :rtype: bool + :return: True if successful, otherwise False. + """ + return self.connection.set_queue_attribute(self, attribute, value) + + def get_timeout(self): + """ + Get the visibility timeout for the queue. + + :rtype: int + :return: The number of seconds as an integer. + """ + a = self.get_attributes('VisibilityTimeout') + return int(a['VisibilityTimeout']) + + def set_timeout(self, visibility_timeout): + """ + Set the visibility timeout for the queue. + + :type visibility_timeout: int + :param visibility_timeout: The desired timeout in seconds + """ + retval = self.set_attribute('VisibilityTimeout', visibility_timeout) + if retval: + self.visibility_timeout = visibility_timeout + return retval + + def add_permission(self, label, aws_account_id, action_name): + """ + Add a permission to a queue. + + :type label: str or unicode + :param label: A unique identification of the permission you are setting. + Maximum of 80 characters ``[0-9a-zA-Z_-]`` + Example, AliceSendMessage + + :type aws_account_id: str or unicode + :param principal_id: The AWS account number of the principal who + will be given permission. The principal must have an AWS account, + but does not need to be signed up for Amazon SQS. For information + about locating the AWS account identification. + + :type action_name: str or unicode + :param action_name: The action. Valid choices are: + SendMessage|ReceiveMessage|DeleteMessage| + ChangeMessageVisibility|GetQueueAttributes|* + + :rtype: bool + :return: True if successful, False otherwise. + + """ + return self.connection.add_permission(self, label, aws_account_id, + action_name) + + def remove_permission(self, label): + """ + Remove a permission from a queue. + + :type label: str or unicode + :param label: The unique label associated with the permission + being removed. + + :rtype: bool + :return: True if successful, False otherwise. + """ + return self.connection.remove_permission(self, label) + + def read(self, visibility_timeout=None, wait_time_seconds=None, + message_attributes=None): + """ + Read a single message from the queue. + + :type visibility_timeout: int + :param visibility_timeout: The timeout for this message in seconds + + :type wait_time_seconds: int + :param wait_time_seconds: The duration (in seconds) for which the call + will wait for a message to arrive in the queue before returning. + If a message is available, the call will return sooner than + wait_time_seconds. + + :type message_attributes: list + :param message_attributes: The name(s) of additional message + attributes to return. The default is to return no additional + message attributes. Use ``['All']`` or ``['.*']`` to return all. + + :rtype: :class:`boto.sqs.message.Message` + :return: A single message or None if queue is empty + """ + rs = self.get_messages(1, visibility_timeout, + wait_time_seconds=wait_time_seconds, + message_attributes=message_attributes) + if len(rs) == 1: + return rs[0] + else: + return None + + def write(self, message, delay_seconds=None): + """ + Add a single message to the queue. + + :type message: Message + :param message: The message to be written to the queue + + :rtype: :class:`boto.sqs.message.Message` + :return: The :class:`boto.sqs.message.Message` object that was written. + """ + new_msg = self.connection.send_message(self, + message.get_body_encoded(), delay_seconds=delay_seconds, + message_attributes=message.message_attributes) + message.id = new_msg.id + message.md5 = new_msg.md5 + return message + + def write_batch(self, messages): + """ + Delivers up to 10 messages in a single request. + + :type messages: List of lists. + :param messages: A list of lists or tuples. Each inner + tuple represents a single message to be written + and consists of and ID (string) that must be unique + within the list of messages, the message body itself + which can be a maximum of 64K in length, an + integer which represents the delay time (in seconds) + for the message (0-900) before the message will + be delivered to the queue, and an optional dict of + message attributes like those passed to ``send_message`` + in the connection class. + """ + return self.connection.send_message_batch(self, messages) + + def new_message(self, body='', **kwargs): + """ + Create new message of appropriate class. + + :type body: message body + :param body: The body of the newly created message (optional). + + :rtype: :class:`boto.sqs.message.Message` + :return: A new Message object + """ + m = self.message_class(self, body, **kwargs) + m.queue = self + return m + + # get a variable number of messages, returns a list of messages + def get_messages(self, num_messages=1, visibility_timeout=None, + attributes=None, wait_time_seconds=None, + message_attributes=None): + """ + Get a variable number of messages. + + :type num_messages: int + :param num_messages: The maximum number of messages to read from + the queue. + + :type visibility_timeout: int + :param visibility_timeout: The VisibilityTimeout for the messages read. + + :type attributes: str + :param attributes: The name of additional attribute to return + with response or All if you want all attributes. The + default is to return no additional attributes. Valid + values: All SenderId SentTimestamp ApproximateReceiveCount + ApproximateFirstReceiveTimestamp + + :type wait_time_seconds: int + :param wait_time_seconds: The duration (in seconds) for which the call + will wait for a message to arrive in the queue before returning. + If a message is available, the call will return sooner than + wait_time_seconds. + + :type message_attributes: list + :param message_attributes: The name(s) of additional message + attributes to return. The default is to return no additional + message attributes. Use ``['All']`` or ``['.*']`` to return all. + + :rtype: list + :return: A list of :class:`boto.sqs.message.Message` objects. + """ + return self.connection.receive_message( + self, number_messages=num_messages, + visibility_timeout=visibility_timeout, attributes=attributes, + wait_time_seconds=wait_time_seconds, + message_attributes=message_attributes) + + def delete_message(self, message): + """ + Delete a message from the queue. + + :type message: :class:`boto.sqs.message.Message` + :param message: The :class:`boto.sqs.message.Message` object to delete. + + :rtype: bool + :return: True if successful, False otherwise + """ + return self.connection.delete_message(self, message) + + def delete_message_batch(self, messages): + """ + Deletes a list of messages in a single request. + + :type messages: List of :class:`boto.sqs.message.Message` objects. + :param messages: A list of message objects. + """ + return self.connection.delete_message_batch(self, messages) + + def change_message_visibility_batch(self, messages): + """ + A batch version of change_message_visibility that can act + on up to 10 messages at a time. + + :type messages: List of tuples. + :param messages: A list of tuples where each tuple consists + of a :class:`boto.sqs.message.Message` object and an integer + that represents the new visibility timeout for that message. + """ + return self.connection.change_message_visibility_batch(self, messages) + + def delete(self): + """ + Delete the queue. + """ + return self.connection.delete_queue(self) + + def purge(self): + """ + Purge all messages in the queue. + """ + return self.connection.purge_queue(self) + + def clear(self, page_size=10, vtimeout=10): + """Deprecated utility function to remove all messages from a queue""" + return self.purge() + + def count(self, page_size=10, vtimeout=10): + """ + Utility function to count the number of messages in a queue. + Note: This function now calls GetQueueAttributes to obtain + an 'approximate' count of the number of messages in a queue. + """ + a = self.get_attributes('ApproximateNumberOfMessages') + return int(a['ApproximateNumberOfMessages']) + + def count_slow(self, page_size=10, vtimeout=10): + """ + Deprecated. This is the old 'count' method that actually counts + the messages by reading them all. This gives an accurate count but + is very slow for queues with non-trivial number of messasges. + Instead, use get_attributes('ApproximateNumberOfMessages') to take + advantage of the new SQS capability. This is retained only for + the unit tests. + """ + n = 0 + l = self.get_messages(page_size, vtimeout) + while l: + for m in l: + n += 1 + l = self.get_messages(page_size, vtimeout) + return n + + def dump(self, file_name, page_size=10, vtimeout=10, sep='\n'): + """Utility function to dump the messages in a queue to a file + NOTE: Page size must be < 10 else SQS errors""" + fp = open(file_name, 'wb') + n = 0 + l = self.get_messages(page_size, vtimeout) + while l: + for m in l: + fp.write(m.get_body()) + if sep: + fp.write(sep) + n += 1 + l = self.get_messages(page_size, vtimeout) + fp.close() + return n + + def save_to_file(self, fp, sep='\n'): + """ + Read all messages from the queue and persist them to file-like object. + Messages are written to the file and the 'sep' string is written + in between messages. Messages are deleted from the queue after + being written to the file. + Returns the number of messages saved. + """ + n = 0 + m = self.read() + while m: + n += 1 + fp.write(m.get_body()) + if sep: + fp.write(sep) + self.delete_message(m) + m = self.read() + return n + + def save_to_filename(self, file_name, sep='\n'): + """ + Read all messages from the queue and persist them to local file. + Messages are written to the file and the 'sep' string is written + in between messages. Messages are deleted from the queue after + being written to the file. + Returns the number of messages saved. + """ + fp = open(file_name, 'wb') + n = self.save_to_file(fp, sep) + fp.close() + return n + + # for backwards compatibility + save = save_to_filename + + def save_to_s3(self, bucket): + """ + Read all messages from the queue and persist them to S3. + Messages are stored in the S3 bucket using a naming scheme of:: + + / + + Messages are deleted from the queue after being saved to S3. + Returns the number of messages saved. + """ + n = 0 + m = self.read() + while m: + n += 1 + key = bucket.new_key('%s/%s' % (self.id, m.id)) + key.set_contents_from_string(m.get_body()) + self.delete_message(m) + m = self.read() + return n + + def load_from_s3(self, bucket, prefix=None): + """ + Load messages previously saved to S3. + """ + n = 0 + if prefix: + prefix = '%s/' % prefix + else: + prefix = '%s/' % self.id[1:] + rs = bucket.list(prefix=prefix) + for key in rs: + n += 1 + m = self.new_message(key.get_contents_as_string()) + self.write(m) + return n + + def load_from_file(self, fp, sep='\n'): + """Utility function to load messages from a file-like object to a queue""" + n = 0 + body = '' + l = fp.readline() + while l: + if l == sep: + m = Message(self, body) + self.write(m) + n += 1 + print('writing message %d' % n) + body = '' + else: + body = body + l + l = fp.readline() + return n + + def load_from_filename(self, file_name, sep='\n'): + """Utility function to load messages from a local filename to a queue""" + fp = open(file_name, 'rb') + n = self.load_from_file(fp, sep) + fp.close() + return n + + # for backward compatibility + load = load_from_filename + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sqs/regioninfo.py b/desktop/core/ext-py/boto-2.38.0/boto/sqs/regioninfo.py new file mode 100644 index 0000000000000000000000000000000000000000..d21dff9cda30e268daa1e1ff5ee3edd030270ab6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sqs/regioninfo.py @@ -0,0 +1,33 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo + +class SQSRegionInfo(RegionInfo): + + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + from boto.sqs.connection import SQSConnection + super(SQSRegionInfo, self).__init__(connection, name, endpoint, + SQSConnection) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/storage_uri.py b/desktop/core/ext-py/boto-2.38.0/boto/storage_uri.py new file mode 100755 index 0000000000000000000000000000000000000000..34b7b060455305379f2ff4cb39df98388795f36d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/storage_uri.py @@ -0,0 +1,891 @@ +# Copyright 2010 Google Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +import os +import sys +import textwrap +from boto.s3.deletemarker import DeleteMarker +from boto.exception import BotoClientError +from boto.exception import InvalidUriError + + +class StorageUri(object): + """ + Base class for representing storage provider-independent bucket and + object name with a shorthand URI-like syntax. + + This is an abstract class: the constructor cannot be called (throws an + exception if you try). + """ + + connection = None + # Optional args that can be set from one of the concrete subclass + # constructors, to change connection behavior (e.g., to override + # https_connection_factory). + connection_args = None + + # Map of provider scheme ('s3' or 'gs') to AWSAuthConnection object. We + # maintain a pool here in addition to the connection pool implemented + # in AWSAuthConnection because the latter re-creates its connection pool + # every time that class is instantiated (so the current pool is used to + # avoid re-instantiating AWSAuthConnection). + provider_pool = {} + + def __init__(self): + """Uncallable constructor on abstract base StorageUri class. + """ + raise BotoClientError('Attempt to instantiate abstract StorageUri ' + 'class') + + def __repr__(self): + """Returns string representation of URI.""" + return self.uri + + def equals(self, uri): + """Returns true if two URIs are equal.""" + return self.uri == uri.uri + + def check_response(self, resp, level, uri): + if resp is None: + raise InvalidUriError('\n'.join(textwrap.wrap( + 'Attempt to get %s for "%s" failed. This can happen if ' + 'the URI refers to a non-existent object or if you meant to ' + 'operate on a directory (e.g., leaving off -R option on gsutil ' + 'cp, mv, or ls of a bucket)' % (level, uri), 80))) + + def _check_bucket_uri(self, function_name): + if issubclass(type(self), BucketStorageUri) and not self.bucket_name: + raise InvalidUriError( + '%s on bucket-less URI (%s)' % (function_name, self.uri)) + + def _check_object_uri(self, function_name): + if issubclass(type(self), BucketStorageUri) and not self.object_name: + raise InvalidUriError('%s on object-less URI (%s)' % + (function_name, self.uri)) + + def _warn_about_args(self, function_name, **args): + for arg in args: + if args[arg]: + sys.stderr.write( + 'Warning: %s ignores argument: %s=%s\n' % + (function_name, arg, str(args[arg]))) + + def connect(self, access_key_id=None, secret_access_key=None, **kwargs): + """ + Opens a connection to appropriate provider, depending on provider + portion of URI. Requires Credentials defined in boto config file (see + boto/pyami/config.py). + @type storage_uri: StorageUri + @param storage_uri: StorageUri specifying a bucket or a bucket+object + @rtype: L{AWSAuthConnection} + @return: A connection to storage service provider of the given URI. + """ + connection_args = dict(self.connection_args or ()) + + if (hasattr(self, 'suppress_consec_slashes') and + 'suppress_consec_slashes' not in connection_args): + connection_args['suppress_consec_slashes'] = ( + self.suppress_consec_slashes) + connection_args.update(kwargs) + if not self.connection: + if self.scheme in self.provider_pool: + self.connection = self.provider_pool[self.scheme] + elif self.scheme == 's3': + from boto.s3.connection import S3Connection + self.connection = S3Connection(access_key_id, + secret_access_key, + **connection_args) + self.provider_pool[self.scheme] = self.connection + elif self.scheme == 'gs': + from boto.gs.connection import GSConnection + # Use OrdinaryCallingFormat instead of boto-default + # SubdomainCallingFormat because the latter changes the hostname + # that's checked during cert validation for HTTPS connections, + # which will fail cert validation (when cert validation is + # enabled). + # + # The same is not true for S3's HTTPS certificates. In fact, + # we don't want to do this for S3 because S3 requires the + # subdomain to match the location of the bucket. If the proper + # subdomain is not used, the server will return a 301 redirect + # with no Location header. + # + # Note: the following import can't be moved up to the + # start of this file else it causes a config import failure when + # run from the resumable upload/download tests. + from boto.s3.connection import OrdinaryCallingFormat + connection_args['calling_format'] = OrdinaryCallingFormat() + self.connection = GSConnection(access_key_id, + secret_access_key, + **connection_args) + self.provider_pool[self.scheme] = self.connection + elif self.scheme == 'file': + from boto.file.connection import FileConnection + self.connection = FileConnection(self) + else: + raise InvalidUriError('Unrecognized scheme "%s"' % + self.scheme) + self.connection.debug = self.debug + return self.connection + + def has_version(self): + return (issubclass(type(self), BucketStorageUri) + and ((self.version_id is not None) + or (self.generation is not None))) + + def delete_key(self, validate=False, headers=None, version_id=None, + mfa_token=None): + self._check_object_uri('delete_key') + bucket = self.get_bucket(validate, headers) + return bucket.delete_key(self.object_name, headers, version_id, + mfa_token) + + def list_bucket(self, prefix='', delimiter='', headers=None, + all_versions=False): + self._check_bucket_uri('list_bucket') + bucket = self.get_bucket(headers=headers) + if all_versions: + return (v for v in bucket.list_versions( + prefix=prefix, delimiter=delimiter, headers=headers) + if not isinstance(v, DeleteMarker)) + else: + return bucket.list(prefix=prefix, delimiter=delimiter, + headers=headers) + + def get_all_keys(self, validate=False, headers=None, prefix=None): + bucket = self.get_bucket(validate, headers) + return bucket.get_all_keys(headers) + + def get_bucket(self, validate=False, headers=None): + self._check_bucket_uri('get_bucket') + conn = self.connect() + bucket = conn.get_bucket(self.bucket_name, validate, headers) + self.check_response(bucket, 'bucket', self.uri) + return bucket + + def get_key(self, validate=False, headers=None, version_id=None): + self._check_object_uri('get_key') + bucket = self.get_bucket(validate, headers) + key = bucket.get_key(self.object_name, headers, version_id) + self.check_response(key, 'key', self.uri) + return key + + def new_key(self, validate=False, headers=None): + self._check_object_uri('new_key') + bucket = self.get_bucket(validate, headers) + return bucket.new_key(self.object_name) + + def get_contents_to_stream(self, fp, headers=None, version_id=None): + self._check_object_uri('get_key') + self._warn_about_args('get_key', validate=False) + key = self.get_key(None, headers) + self.check_response(key, 'key', self.uri) + return key.get_contents_to_file(fp, headers, version_id=version_id) + + def get_contents_to_file(self, fp, headers=None, cb=None, num_cb=10, + torrent=False, version_id=None, + res_download_handler=None, response_headers=None, + hash_algs=None): + self._check_object_uri('get_contents_to_file') + key = self.get_key(None, headers) + self.check_response(key, 'key', self.uri) + if hash_algs: + key.get_contents_to_file(fp, headers, cb, num_cb, torrent, + version_id, res_download_handler, + response_headers, + hash_algs=hash_algs) + else: + key.get_contents_to_file(fp, headers, cb, num_cb, torrent, + version_id, res_download_handler, + response_headers) + + def get_contents_as_string(self, validate=False, headers=None, cb=None, + num_cb=10, torrent=False, version_id=None): + self._check_object_uri('get_contents_as_string') + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + return key.get_contents_as_string(headers, cb, num_cb, torrent, + version_id) + + def acl_class(self): + conn = self.connect() + acl_class = conn.provider.acl_class + self.check_response(acl_class, 'acl_class', self.uri) + return acl_class + + def canned_acls(self): + conn = self.connect() + canned_acls = conn.provider.canned_acls + self.check_response(canned_acls, 'canned_acls', self.uri) + return canned_acls + + +class BucketStorageUri(StorageUri): + """ + StorageUri subclass that handles bucket storage providers. + Callers should instantiate this class by calling boto.storage_uri(). + """ + + delim = '/' + capabilities = set([]) # A set of additional capabilities. + + def __init__(self, scheme, bucket_name=None, object_name=None, + debug=0, connection_args=None, suppress_consec_slashes=True, + version_id=None, generation=None, is_latest=False): + """Instantiate a BucketStorageUri from scheme,bucket,object tuple. + + @type scheme: string + @param scheme: URI scheme naming the storage provider (gs, s3, etc.) + @type bucket_name: string + @param bucket_name: bucket name + @type object_name: string + @param object_name: object name, excluding generation/version. + @type debug: int + @param debug: debug level to pass in to connection (range 0..2) + @type connection_args: map + @param connection_args: optional map containing args to be + passed to {S3,GS}Connection constructor (e.g., to override + https_connection_factory). + @param suppress_consec_slashes: If provided, controls whether + consecutive slashes will be suppressed in key paths. + @param version_id: Object version id (S3-specific). + @param generation: Object generation number (GCS-specific). + @param is_latest: boolean indicating that a versioned object is the + current version + + After instantiation the components are available in the following + fields: scheme, bucket_name, object_name, version_id, generation, + is_latest, versionless_uri, version_specific_uri, uri. + Note: If instantiated without version info, the string representation + for a URI stays versionless; similarly, if instantiated with version + info, the string representation for a URI stays version-specific. If you + call one of the uri.set_contents_from_xyz() methods, a specific object + version will be created, and its version-specific URI string can be + retrieved from version_specific_uri even if the URI was instantiated + without version info. + """ + + self.scheme = scheme + self.bucket_name = bucket_name + self.object_name = object_name + self.debug = debug + if connection_args: + self.connection_args = connection_args + self.suppress_consec_slashes = suppress_consec_slashes + self.version_id = version_id + self.generation = generation and int(generation) + self.is_latest = is_latest + self.is_version_specific = bool(self.generation) or bool(version_id) + self._build_uri_strings() + + def _build_uri_strings(self): + if self.bucket_name and self.object_name: + self.versionless_uri = '%s://%s/%s' % (self.scheme, self.bucket_name, + self.object_name) + if self.generation: + self.version_specific_uri = '%s#%s' % (self.versionless_uri, + self.generation) + elif self.version_id: + self.version_specific_uri = '%s#%s' % ( + self.versionless_uri, self.version_id) + if self.is_version_specific: + self.uri = self.version_specific_uri + else: + self.uri = self.versionless_uri + elif self.bucket_name: + self.uri = ('%s://%s/' % (self.scheme, self.bucket_name)) + else: + self.uri = ('%s://' % self.scheme) + + def _update_from_key(self, key): + self._update_from_values( + getattr(key, 'version_id', None), + getattr(key, 'generation', None), + getattr(key, 'is_latest', None), + getattr(key, 'md5', None)) + + def _update_from_values(self, version_id, generation, is_latest, md5): + self.version_id = version_id + self.generation = generation + self.is_latest = is_latest + self._build_uri_strings() + self.md5 = md5 + + def get_key(self, validate=False, headers=None, version_id=None): + self._check_object_uri('get_key') + bucket = self.get_bucket(validate, headers) + if self.get_provider().name == 'aws': + key = bucket.get_key(self.object_name, headers, + version_id=(version_id or self.version_id)) + elif self.get_provider().name == 'google': + key = bucket.get_key(self.object_name, headers, + generation=self.generation) + self.check_response(key, 'key', self.uri) + return key + + def delete_key(self, validate=False, headers=None, version_id=None, + mfa_token=None): + self._check_object_uri('delete_key') + bucket = self.get_bucket(validate, headers) + if self.get_provider().name == 'aws': + version_id = version_id or self.version_id + return bucket.delete_key(self.object_name, headers, version_id, + mfa_token) + elif self.get_provider().name == 'google': + return bucket.delete_key(self.object_name, headers, + generation=self.generation) + + def clone_replace_name(self, new_name): + """Instantiate a BucketStorageUri from the current BucketStorageUri, + but replacing the object_name. + + @type new_name: string + @param new_name: new object name + """ + self._check_bucket_uri('clone_replace_name') + return BucketStorageUri( + self.scheme, bucket_name=self.bucket_name, object_name=new_name, + debug=self.debug, + suppress_consec_slashes=self.suppress_consec_slashes) + + def clone_replace_key(self, key): + """Instantiate a BucketStorageUri from the current BucketStorageUri, by + replacing the object name with the object name and other metadata found + in the given Key object (including generation). + + @type key: Key + @param key: key for the new StorageUri to represent + """ + self._check_bucket_uri('clone_replace_key') + version_id = None + generation = None + is_latest = False + if hasattr(key, 'version_id'): + version_id = key.version_id + if hasattr(key, 'generation'): + generation = key.generation + if hasattr(key, 'is_latest'): + is_latest = key.is_latest + + return BucketStorageUri( + key.provider.get_provider_name(), + bucket_name=key.bucket.name, + object_name=key.name, + debug=self.debug, + suppress_consec_slashes=self.suppress_consec_slashes, + version_id=version_id, + generation=generation, + is_latest=is_latest) + + def get_acl(self, validate=False, headers=None, version_id=None): + """returns a bucket's acl""" + self._check_bucket_uri('get_acl') + bucket = self.get_bucket(validate, headers) + # This works for both bucket- and object- level ACLs (former passes + # key_name=None): + key_name = self.object_name or '' + if self.get_provider().name == 'aws': + version_id = version_id or self.version_id + acl = bucket.get_acl(key_name, headers, version_id) + else: + acl = bucket.get_acl(key_name, headers, generation=self.generation) + self.check_response(acl, 'acl', self.uri) + return acl + + def get_def_acl(self, validate=False, headers=None): + """returns a bucket's default object acl""" + self._check_bucket_uri('get_def_acl') + bucket = self.get_bucket(validate, headers) + acl = bucket.get_def_acl(headers) + self.check_response(acl, 'acl', self.uri) + return acl + + def get_cors(self, validate=False, headers=None): + """returns a bucket's CORS XML""" + self._check_bucket_uri('get_cors') + bucket = self.get_bucket(validate, headers) + cors = bucket.get_cors(headers) + self.check_response(cors, 'cors', self.uri) + return cors + + def set_cors(self, cors, validate=False, headers=None): + """sets or updates a bucket's CORS XML""" + self._check_bucket_uri('set_cors ') + bucket = self.get_bucket(validate, headers) + bucket.set_cors(cors.to_xml(), headers) + + def get_location(self, validate=False, headers=None): + self._check_bucket_uri('get_location') + bucket = self.get_bucket(validate, headers) + return bucket.get_location() + + def get_storage_class(self, validate=False, headers=None): + self._check_bucket_uri('get_storage_class') + # StorageClass is defined as a bucket param for GCS, but as a key + # param for S3. + if self.scheme != 'gs': + raise ValueError('get_storage_class() not supported for %s ' + 'URIs.' % self.scheme) + bucket = self.get_bucket(validate, headers) + return bucket.get_storage_class() + + def get_subresource(self, subresource, validate=False, headers=None, + version_id=None): + self._check_bucket_uri('get_subresource') + bucket = self.get_bucket(validate, headers) + return bucket.get_subresource(subresource, self.object_name, headers, + version_id) + + def add_group_email_grant(self, permission, email_address, recursive=False, + validate=False, headers=None): + self._check_bucket_uri('add_group_email_grant') + if self.scheme != 'gs': + raise ValueError('add_group_email_grant() not supported for %s ' + 'URIs.' % self.scheme) + if self.object_name: + if recursive: + raise ValueError('add_group_email_grant() on key-ful URI cannot ' + 'specify recursive=True') + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.add_group_email_grant(permission, email_address, headers) + elif self.bucket_name: + bucket = self.get_bucket(validate, headers) + bucket.add_group_email_grant(permission, email_address, recursive, + headers) + else: + raise InvalidUriError('add_group_email_grant() on bucket-less URI ' + '%s' % self.uri) + + def add_email_grant(self, permission, email_address, recursive=False, + validate=False, headers=None): + self._check_bucket_uri('add_email_grant') + if not self.object_name: + bucket = self.get_bucket(validate, headers) + bucket.add_email_grant(permission, email_address, recursive, + headers) + else: + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.add_email_grant(permission, email_address) + + def add_user_grant(self, permission, user_id, recursive=False, + validate=False, headers=None): + self._check_bucket_uri('add_user_grant') + if not self.object_name: + bucket = self.get_bucket(validate, headers) + bucket.add_user_grant(permission, user_id, recursive, headers) + else: + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.add_user_grant(permission, user_id) + + def list_grants(self, headers=None): + self._check_bucket_uri('list_grants ') + bucket = self.get_bucket(headers) + return bucket.list_grants(headers) + + def is_file_uri(self): + """Returns True if this URI names a file or directory.""" + return False + + def is_cloud_uri(self): + """Returns True if this URI names a bucket or object.""" + return True + + def names_container(self): + """ + Returns True if this URI names a directory or bucket. Will return + False for bucket subdirs; providing bucket subdir semantics needs to + be done by the caller (like gsutil does). + """ + return bool(not self.object_name) + + def names_singleton(self): + """Returns True if this URI names a file or object.""" + return bool(self.object_name) + + def names_directory(self): + """Returns True if this URI names a directory.""" + return False + + def names_provider(self): + """Returns True if this URI names a provider.""" + return bool(not self.bucket_name) + + def names_bucket(self): + """Returns True if this URI names a bucket.""" + return bool(self.bucket_name) and bool(not self.object_name) + + def names_file(self): + """Returns True if this URI names a file.""" + return False + + def names_object(self): + """Returns True if this URI names an object.""" + return self.names_singleton() + + def is_stream(self): + """Returns True if this URI represents input/output stream.""" + return False + + def create_bucket(self, headers=None, location='', policy=None, + storage_class=None): + self._check_bucket_uri('create_bucket ') + conn = self.connect() + # Pass storage_class param only if this is a GCS bucket. (In S3 the + # storage class is specified on the key object.) + if self.scheme == 'gs': + return conn.create_bucket(self.bucket_name, headers, location, policy, + storage_class) + else: + return conn.create_bucket(self.bucket_name, headers, location, policy) + + def delete_bucket(self, headers=None): + self._check_bucket_uri('delete_bucket') + conn = self.connect() + return conn.delete_bucket(self.bucket_name, headers) + + def get_all_buckets(self, headers=None): + conn = self.connect() + return conn.get_all_buckets(headers) + + def get_provider(self): + conn = self.connect() + provider = conn.provider + self.check_response(provider, 'provider', self.uri) + return provider + + def set_acl(self, acl_or_str, key_name='', validate=False, headers=None, + version_id=None, if_generation=None, if_metageneration=None): + """Sets or updates a bucket's ACL.""" + self._check_bucket_uri('set_acl') + key_name = key_name or self.object_name or '' + bucket = self.get_bucket(validate, headers) + if self.generation: + bucket.set_acl( + acl_or_str, key_name, headers, generation=self.generation, + if_generation=if_generation, if_metageneration=if_metageneration) + else: + version_id = version_id or self.version_id + bucket.set_acl(acl_or_str, key_name, headers, version_id) + + def set_xml_acl(self, xmlstring, key_name='', validate=False, headers=None, + version_id=None, if_generation=None, if_metageneration=None): + """Sets or updates a bucket's ACL with an XML string.""" + self._check_bucket_uri('set_xml_acl') + key_name = key_name or self.object_name or '' + bucket = self.get_bucket(validate, headers) + if self.generation: + bucket.set_xml_acl( + xmlstring, key_name, headers, generation=self.generation, + if_generation=if_generation, if_metageneration=if_metageneration) + else: + version_id = version_id or self.version_id + bucket.set_xml_acl(xmlstring, key_name, headers, + version_id=version_id) + + def set_def_xml_acl(self, xmlstring, validate=False, headers=None): + """Sets or updates a bucket's default object ACL with an XML string.""" + self._check_bucket_uri('set_def_xml_acl') + self.get_bucket(validate, headers).set_def_xml_acl(xmlstring, headers) + + def set_def_acl(self, acl_or_str, validate=False, headers=None, + version_id=None): + """Sets or updates a bucket's default object ACL.""" + self._check_bucket_uri('set_def_acl') + self.get_bucket(validate, headers).set_def_acl(acl_or_str, headers) + + def set_canned_acl(self, acl_str, validate=False, headers=None, + version_id=None): + """Sets or updates a bucket's acl to a predefined (canned) value.""" + self._check_object_uri('set_canned_acl') + self._warn_about_args('set_canned_acl', version_id=version_id) + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.set_canned_acl(acl_str, headers) + + def set_def_canned_acl(self, acl_str, validate=False, headers=None, + version_id=None): + """Sets or updates a bucket's default object acl to a predefined + (canned) value.""" + self._check_bucket_uri('set_def_canned_acl ') + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.set_def_canned_acl(acl_str, headers, version_id) + + def set_subresource(self, subresource, value, validate=False, headers=None, + version_id=None): + self._check_bucket_uri('set_subresource') + bucket = self.get_bucket(validate, headers) + bucket.set_subresource(subresource, value, self.object_name, headers, + version_id) + + def set_contents_from_string(self, s, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=False): + self._check_object_uri('set_contents_from_string') + key = self.new_key(headers=headers) + if self.scheme == 'gs': + if reduced_redundancy: + sys.stderr.write('Warning: GCS does not support ' + 'reduced_redundancy; argument ignored by ' + 'set_contents_from_string') + result = key.set_contents_from_string( + s, headers, replace, cb, num_cb, policy, md5) + else: + result = key.set_contents_from_string( + s, headers, replace, cb, num_cb, policy, md5, + reduced_redundancy) + self._update_from_key(key) + return result + + def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, + num_cb=10, policy=None, md5=None, size=None, + rewind=False, res_upload_handler=None): + self._check_object_uri('set_contents_from_file') + key = self.new_key(headers=headers) + if self.scheme == 'gs': + result = key.set_contents_from_file( + fp, headers, replace, cb, num_cb, policy, md5, size=size, + rewind=rewind, res_upload_handler=res_upload_handler) + if res_upload_handler: + self._update_from_values(None, res_upload_handler.generation, + None, md5) + else: + self._warn_about_args('set_contents_from_file', + res_upload_handler=res_upload_handler) + result = key.set_contents_from_file( + fp, headers, replace, cb, num_cb, policy, md5, size=size, + rewind=rewind) + self._update_from_key(key) + return result + + def set_contents_from_stream(self, fp, headers=None, replace=True, cb=None, + policy=None, reduced_redundancy=False): + self._check_object_uri('set_contents_from_stream') + dst_key = self.new_key(False, headers) + result = dst_key.set_contents_from_stream( + fp, headers, replace, cb, policy=policy, + reduced_redundancy=reduced_redundancy) + self._update_from_key(dst_key) + return result + + def copy_key(self, src_bucket_name, src_key_name, metadata=None, + src_version_id=None, storage_class='STANDARD', + preserve_acl=False, encrypt_key=False, headers=None, + query_args=None, src_generation=None): + """Returns newly created key.""" + self._check_object_uri('copy_key') + dst_bucket = self.get_bucket(validate=False, headers=headers) + if src_generation: + return dst_bucket.copy_key( + new_key_name=self.object_name, + src_bucket_name=src_bucket_name, + src_key_name=src_key_name, metadata=metadata, + storage_class=storage_class, preserve_acl=preserve_acl, + encrypt_key=encrypt_key, headers=headers, query_args=query_args, + src_generation=src_generation) + else: + return dst_bucket.copy_key( + new_key_name=self.object_name, + src_bucket_name=src_bucket_name, src_key_name=src_key_name, + metadata=metadata, src_version_id=src_version_id, + storage_class=storage_class, preserve_acl=preserve_acl, + encrypt_key=encrypt_key, headers=headers, query_args=query_args) + + def enable_logging(self, target_bucket, target_prefix=None, validate=False, + headers=None, version_id=None): + self._check_bucket_uri('enable_logging') + bucket = self.get_bucket(validate, headers) + bucket.enable_logging(target_bucket, target_prefix, headers=headers) + + def disable_logging(self, validate=False, headers=None, version_id=None): + self._check_bucket_uri('disable_logging') + bucket = self.get_bucket(validate, headers) + bucket.disable_logging(headers=headers) + + def get_logging_config(self, validate=False, headers=None, version_id=None): + self._check_bucket_uri('get_logging_config') + bucket = self.get_bucket(validate, headers) + return bucket.get_logging_config(headers=headers) + + def set_website_config(self, main_page_suffix=None, error_key=None, + validate=False, headers=None): + self._check_bucket_uri('set_website_config') + bucket = self.get_bucket(validate, headers) + if not (main_page_suffix or error_key): + bucket.delete_website_configuration(headers) + else: + bucket.configure_website(main_page_suffix, error_key, headers) + + def get_website_config(self, validate=False, headers=None): + self._check_bucket_uri('get_website_config') + bucket = self.get_bucket(validate, headers) + return bucket.get_website_configuration(headers) + + def get_versioning_config(self, headers=None): + self._check_bucket_uri('get_versioning_config') + bucket = self.get_bucket(False, headers) + return bucket.get_versioning_status(headers) + + def configure_versioning(self, enabled, headers=None): + self._check_bucket_uri('configure_versioning') + bucket = self.get_bucket(False, headers) + return bucket.configure_versioning(enabled, headers) + + def set_metadata(self, metadata_plus, metadata_minus, preserve_acl, + headers=None): + return self.get_key(False).set_remote_metadata(metadata_plus, + metadata_minus, + preserve_acl, + headers=headers) + + def compose(self, components, content_type=None, headers=None): + self._check_object_uri('compose') + component_keys = [] + for suri in components: + component_keys.append(suri.new_key()) + component_keys[-1].generation = suri.generation + self.generation = self.new_key().compose( + component_keys, content_type=content_type, headers=headers) + self._build_uri_strings() + return self + + def get_lifecycle_config(self, validate=False, headers=None): + """Returns a bucket's lifecycle configuration.""" + self._check_bucket_uri('get_lifecycle_config') + bucket = self.get_bucket(validate, headers) + lifecycle_config = bucket.get_lifecycle_config(headers) + self.check_response(lifecycle_config, 'lifecycle', self.uri) + return lifecycle_config + + def configure_lifecycle(self, lifecycle_config, validate=False, + headers=None): + """Sets or updates a bucket's lifecycle configuration.""" + self._check_bucket_uri('configure_lifecycle') + bucket = self.get_bucket(validate, headers) + bucket.configure_lifecycle(lifecycle_config, headers) + + def exists(self, headers=None): + """Returns True if the object exists or False if it doesn't""" + if not self.object_name: + raise InvalidUriError('exists on object-less URI (%s)' % self.uri) + bucket = self.get_bucket() + key = bucket.get_key(self.object_name, headers=headers) + return bool(key) + + +class FileStorageUri(StorageUri): + """ + StorageUri subclass that handles files in the local file system. + Callers should instantiate this class by calling boto.storage_uri(). + + See file/README about how we map StorageUri operations onto a file system. + """ + + delim = os.sep + + def __init__(self, object_name, debug, is_stream=False): + """Instantiate a FileStorageUri from a path name. + + @type object_name: string + @param object_name: object name + @type debug: boolean + @param debug: whether to enable debugging on this StorageUri + + After instantiation the components are available in the following + fields: uri, scheme, bucket_name (always blank for this "anonymous" + bucket), object_name. + """ + + self.scheme = 'file' + self.bucket_name = '' + self.object_name = object_name + self.uri = 'file://' + object_name + self.debug = debug + self.stream = is_stream + + def clone_replace_name(self, new_name): + """Instantiate a FileStorageUri from the current FileStorageUri, + but replacing the object_name. + + @type new_name: string + @param new_name: new object name + """ + return FileStorageUri(new_name, self.debug, self.stream) + + def is_file_uri(self): + """Returns True if this URI names a file or directory.""" + return True + + def is_cloud_uri(self): + """Returns True if this URI names a bucket or object.""" + return False + + def names_container(self): + """Returns True if this URI names a directory or bucket.""" + return self.names_directory() + + def names_singleton(self): + """Returns True if this URI names a file (or stream) or object.""" + return not self.names_container() + + def names_directory(self): + """Returns True if this URI names a directory.""" + if self.stream: + return False + return os.path.isdir(self.object_name) + + def names_provider(self): + """Returns True if this URI names a provider.""" + return False + + def names_bucket(self): + """Returns True if this URI names a bucket.""" + return False + + def names_file(self): + """Returns True if this URI names a file.""" + return self.names_singleton() + + def names_object(self): + """Returns True if this URI names an object.""" + return False + + def is_stream(self): + """Returns True if this URI represents input/output stream. + """ + return bool(self.stream) + + def close(self): + """Closes the underlying file. + """ + self.get_key().close() + + def exists(self, _headers_not_used=None): + """Returns True if the file exists or False if it doesn't""" + # The _headers_not_used parameter is ignored. It is only there to ensure + # that this method's signature is identical to the exists method on the + # BucketStorageUri class. + return os.path.exists(self.object_name) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sts/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/sts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..156975ecd13d5b572bf8b5c7ae917f49ecccc2a4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sts/__init__.py @@ -0,0 +1,52 @@ +# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.sts.connection import STSConnection +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the STS service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + return get_regions('sts', connection_cls=STSConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.sts.connection.STSConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.sts.connection.STSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sts/connection.py b/desktop/core/ext-py/boto-2.38.0/boto/sts/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..8c0cf4b269ba1ac3926620ffdf9f697f9a4c88a2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sts/connection.py @@ -0,0 +1,652 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Eucalyptus Systems, Inc. +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.connection import AWSQueryConnection +from boto.provider import Provider, NO_CREDENTIALS_PROVIDED +from boto.regioninfo import RegionInfo +from boto.sts.credentials import Credentials, FederationToken, AssumedRole +from boto.sts.credentials import DecodeAuthorizationMessage +import boto +import boto.utils +import datetime +import threading + +_session_token_cache = {} + + +class STSConnection(AWSQueryConnection): + """ + AWS Security Token Service + The AWS Security Token Service is a web service that enables you + to request temporary, limited-privilege credentials for AWS + Identity and Access Management (IAM) users or for users that you + authenticate (federated users). This guide provides descriptions + of the AWS Security Token Service API. + + For more detailed information about using this service, go to + `Using Temporary Security Credentials`_. + + For information about setting up signatures and authorization + through the API, go to `Signing AWS API Requests`_ in the AWS + General Reference . For general information about the Query API, + go to `Making Query Requests`_ in Using IAM . For information + about using security tokens with other AWS products, go to `Using + Temporary Security Credentials to Access AWS`_ in Using Temporary + Security Credentials . + + If you're new to AWS and need additional technical information + about a specific AWS product, you can find the product's technical + documentation at `http://aws.amazon.com/documentation/`_. + + We will refer to Amazon Identity and Access Management using the + abbreviated form IAM. All copyrights and legal protections still + apply. + """ + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'sts.amazonaws.com' + APIVersion = '2011-06-15' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + converter=None, validate_certs=True, anon=False, + security_token=None, profile_name=None): + """ + :type anon: boolean + :param anon: If this parameter is True, the ``STSConnection`` object + will make anonymous requests, and it will not use AWS + Credentials or even search for AWS Credentials to make these + requests. + """ + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint, + connection_cls=STSConnection) + self.region = region + self.anon = anon + self._mutex = threading.Semaphore() + provider = 'aws' + # If an anonymous request is sent, do not try to look for credentials. + # So we pass in dummy values for the access key id, secret access + # key, and session token. It does not matter that they are + # not actual values because the request is anonymous. + if self.anon: + provider = Provider('aws', NO_CREDENTIALS_PROVIDED, + NO_CREDENTIALS_PROVIDED, + NO_CREDENTIALS_PROVIDED) + super(STSConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + validate_certs=validate_certs, + security_token=security_token, + profile_name=profile_name, + provider=provider) + + def _required_auth_capability(self): + if self.anon: + return ['sts-anon'] + else: + return ['hmac-v4'] + + def _check_token_cache(self, token_key, duration=None, window_seconds=60): + token = _session_token_cache.get(token_key, None) + if token: + now = datetime.datetime.utcnow() + expires = boto.utils.parse_ts(token.expiration) + delta = expires - now + if delta < datetime.timedelta(seconds=window_seconds): + msg = 'Cached session token %s is expired' % token_key + boto.log.debug(msg) + token = None + return token + + def _get_session_token(self, duration=None, + mfa_serial_number=None, mfa_token=None): + params = {} + if duration: + params['DurationSeconds'] = duration + if mfa_serial_number: + params['SerialNumber'] = mfa_serial_number + if mfa_token: + params['TokenCode'] = mfa_token + return self.get_object('GetSessionToken', params, + Credentials, verb='POST') + + def get_session_token(self, duration=None, force_new=False, + mfa_serial_number=None, mfa_token=None): + """ + Return a valid session token. Because retrieving new tokens + from the Secure Token Service is a fairly heavyweight operation + this module caches previously retrieved tokens and returns + them when appropriate. Each token is cached with a key + consisting of the region name of the STS endpoint + concatenated with the requesting user's access id. If there + is a token in the cache meeting with this key, the session + expiration is checked to make sure it is still valid and if + so, the cached token is returned. Otherwise, a new session + token is requested from STS and it is placed into the cache + and returned. + + :type duration: int + :param duration: The number of seconds the credentials should + remain valid. + + :type force_new: bool + :param force_new: If this parameter is True, a new session token + will be retrieved from the Secure Token Service regardless + of whether there is a valid cached token or not. + + :type mfa_serial_number: str + :param mfa_serial_number: The serial number of an MFA device. + If this is provided and if the mfa_passcode provided is + valid, the temporary session token will be authorized with + to perform operations requiring the MFA device authentication. + + :type mfa_token: str + :param mfa_token: The 6 digit token associated with the + MFA device. + """ + token_key = '%s:%s' % (self.region.name, self.provider.access_key) + token = self._check_token_cache(token_key, duration) + if force_new or not token: + boto.log.debug('fetching a new token for %s' % token_key) + try: + self._mutex.acquire() + token = self._get_session_token(duration, + mfa_serial_number, + mfa_token) + _session_token_cache[token_key] = token + finally: + self._mutex.release() + return token + + def get_federation_token(self, name, duration=None, policy=None): + """ + Returns a set of temporary security credentials (consisting of + an access key ID, a secret access key, and a security token) + for a federated user. A typical use is in a proxy application + that is getting temporary security credentials on behalf of + distributed applications inside a corporate network. Because + you must call the `GetFederationToken` action using the long- + term security credentials of an IAM user, this call is + appropriate in contexts where those credentials can be safely + stored, usually in a server-based application. + + **Note:** Do not use this call in mobile applications or + client-based web applications that directly get temporary + security credentials. For those types of applications, use + `AssumeRoleWithWebIdentity`. + + The `GetFederationToken` action must be called by using the + long-term AWS security credentials of the AWS account or an + IAM user. Credentials that are created by IAM users are valid + for the specified duration, between 900 seconds (15 minutes) + and 129600 seconds (36 hours); credentials that are created by + using account credentials have a maximum duration of 3600 + seconds (1 hour). + + The permissions that are granted to the federated user are the + intersection of the policy that is passed with the + `GetFederationToken` request and policies that are associated + with of the entity making the `GetFederationToken` call. + + For more information about how permissions work, see + `Controlling Permissions in Temporary Credentials`_ in Using + Temporary Security Credentials . For information about using + `GetFederationToken` to create temporary security credentials, + see `Creating Temporary Credentials to Enable Access for + Federated Users`_ in Using Temporary Security Credentials . + + :type name: string + :param name: The name of the federated user. The name is used as an + identifier for the temporary security credentials (such as `Bob`). + For example, you can reference the federated user name in a + resource-based policy, such as in an Amazon S3 bucket policy. + + :type policy: string + :param policy: A policy that specifies the permissions that are granted + to the federated user. By default, federated users have no + permissions; they do not inherit any from the IAM user. When you + specify a policy, the federated user's permissions are intersection + of the specified policy and the IAM user's policy. If you don't + specify a policy, federated users can only access AWS resources + that explicitly allow those federated users in a resource policy, + such as in an Amazon S3 bucket policy. + + :type duration: integer + :param duration: The duration, in seconds, that the session + should last. Acceptable durations for federation sessions range + from 900 seconds (15 minutes) to 129600 seconds (36 hours), with + 43200 seconds (12 hours) as the default. Sessions for AWS account + owners are restricted to a maximum of 3600 seconds (one hour). If + the duration is longer than one hour, the session for AWS account + owners defaults to one hour. + + """ + params = {'Name': name} + if duration: + params['DurationSeconds'] = duration + if policy: + params['Policy'] = policy + return self.get_object('GetFederationToken', params, + FederationToken, verb='POST') + + def assume_role(self, role_arn, role_session_name, policy=None, + duration_seconds=None, external_id=None, + mfa_serial_number=None, + mfa_token=None): + """ + Returns a set of temporary security credentials (consisting of + an access key ID, a secret access key, and a security token) + that you can use to access AWS resources that you might not + normally have access to. Typically, you use `AssumeRole` for + cross-account access or federation. + + For cross-account access, imagine that you own multiple + accounts and need to access resources in each account. You + could create long-term credentials in each account to access + those resources. However, managing all those credentials and + remembering which one can access which account can be time + consuming. Instead, you can create one set of long-term + credentials in one account and then use temporary security + credentials to access all the other accounts by assuming roles + in those accounts. For more information about roles, see + `Roles`_ in Using IAM . + + For federation, you can, for example, grant single sign-on + access to the AWS Management Console. If you already have an + identity and authentication system in your corporate network, + you don't have to recreate user identities in AWS in order to + grant those user identities access to AWS. Instead, after a + user has been authenticated, you call `AssumeRole` (and + specify the role with the appropriate permissions) to get + temporary security credentials for that user. With those + temporary security credentials, you construct a sign-in URL + that users can use to access the console. For more + information, see `Scenarios for Granting Temporary Access`_ in + AWS Security Token Service . + + The temporary security credentials are valid for the duration + that you specified when calling `AssumeRole`, which can be + from 900 seconds (15 minutes) to 3600 seconds (1 hour). The + default is 1 hour. + + The temporary security credentials that are returned from the + `AssumeRoleWithWebIdentity` response have the permissions that + are associated with the access policy of the role being + assumed and any policies that are associated with the AWS + resource being accessed. You can further restrict the + permissions of the temporary security credentials by passing a + policy in the request. The resulting permissions are an + intersection of the role's access policy and the policy that + you passed. These policies and any applicable resource-based + policies are evaluated when calls to AWS service APIs are made + using the temporary security credentials. + + To assume a role, your AWS account must be trusted by the + role. The trust relationship is defined in the role's trust + policy when the IAM role is created. You must also have a + policy that allows you to call `sts:AssumeRole`. + + **Important:** You cannot call `Assumerole` by using AWS + account credentials; access will be denied. You must use IAM + user credentials to call `AssumeRole`. + + :type role_arn: string + :param role_arn: The Amazon Resource Name (ARN) of the role that the + caller is assuming. + + :type role_session_name: string + :param role_session_name: An identifier for the assumed role session. + The session name is included as part of the `AssumedRoleUser`. + + :type policy: string + :param policy: A supplemental policy that is associated with the + temporary security credentials from the `AssumeRole` call. The + resulting permissions of the temporary security credentials are an + intersection of this policy and the access policy that is + associated with the role. Use this policy to further restrict the + permissions of the temporary security credentials. + + :type duration_seconds: integer + :param duration_seconds: The duration, in seconds, of the role session. + The value can range from 900 seconds (15 minutes) to 3600 seconds + (1 hour). By default, the value is set to 3600 seconds. + + :type external_id: string + :param external_id: A unique identifier that is used by third parties + to assume a role in their customers' accounts. For each role that + the third party can assume, they should instruct their customers to + create a role with the external ID that the third party generated. + Each time the third party assumes the role, they must pass the + customer's external ID. The external ID is useful in order to help + third parties bind a role to the customer who created it. For more + information about the external ID, see `About the External ID`_ in + Using Temporary Security Credentials . + + :type mfa_serial_number: string + :param mfa_serial_number: The identification number of the MFA device that + is associated with the user who is making the AssumeRole call. + Specify this value if the trust policy of the role being assumed + includes a condition that requires MFA authentication. The value is + either the serial number for a hardware device (such as + GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device + (such as arn:aws:iam::123456789012:mfa/user). Minimum length of 9. + Maximum length of 256. + + :type mfa_token: string + :param mfa_token: The value provided by the MFA device, if the trust + policy of the role being assumed requires MFA (that is, if the + policy includes a condition that tests for MFA). If the role being + assumed requires MFA and if the TokenCode value is missing or + expired, the AssumeRole call returns an "access denied" errror. + Minimum length of 6. Maximum length of 6. + + """ + params = { + 'RoleArn': role_arn, + 'RoleSessionName': role_session_name + } + if policy is not None: + params['Policy'] = policy + if duration_seconds is not None: + params['DurationSeconds'] = duration_seconds + if external_id is not None: + params['ExternalId'] = external_id + if mfa_serial_number is not None: + params['SerialNumber'] = mfa_serial_number + if mfa_token is not None: + params['TokenCode'] = mfa_token + return self.get_object('AssumeRole', params, AssumedRole, verb='POST') + + def assume_role_with_saml(self, role_arn, principal_arn, saml_assertion, + policy=None, duration_seconds=None): + """ + Returns a set of temporary security credentials for users who + have been authenticated via a SAML authentication response. + This operation provides a mechanism for tying an enterprise + identity store or directory to role-based AWS access without + user-specific credentials or configuration. + + The temporary security credentials returned by this operation + consist of an access key ID, a secret access key, and a + security token. Applications can use these temporary security + credentials to sign calls to AWS services. The credentials are + valid for the duration that you specified when calling + `AssumeRoleWithSAML`, which can be up to 3600 seconds (1 hour) + or until the time specified in the SAML authentication + response's `NotOnOrAfter` value, whichever is shorter. + + The maximum duration for a session is 1 hour, and the minimum + duration is 15 minutes, even if values outside this range are + specified. + + Optionally, you can pass an AWS IAM access policy to this + operation. The temporary security credentials that are + returned by the operation have the permissions that are + associated with the access policy of the role being assumed, + except for any permissions explicitly denied by the policy you + pass. This gives you a way to further restrict the permissions + for the federated user. These policies and any applicable + resource-based policies are evaluated when calls to AWS are + made using the temporary security credentials. + + Before your application can call `AssumeRoleWithSAML`, you + must configure your SAML identity provider (IdP) to issue the + claims required by AWS. Additionally, you must use AWS + Identity and Access Management (AWS IAM) to create a SAML + provider entity in your AWS account that represents your + identity provider, and create an AWS IAM role that specifies + this SAML provider in its trust policy. + + Calling `AssumeRoleWithSAML` does not require the use of AWS + security credentials. The identity of the caller is validated + by using keys in the metadata document that is uploaded for + the SAML provider entity for your identity provider. + + For more information, see the following resources: + + + + `Creating Temporary Security Credentials for SAML + Federation`_ in the Using Temporary Security Credentials + guide. + + `SAML Providers`_ in the Using IAM guide. + + `Configuring a Relying Party and Claims in the Using IAM + guide. `_ + + `Creating a Role for SAML-Based Federation`_ in the Using + IAM guide. + + :type role_arn: string + :param role_arn: The Amazon Resource Name (ARN) of the role that the + caller is assuming. + + :type principal_arn: string + :param principal_arn: The Amazon Resource Name (ARN) of the SAML + provider in AWS IAM that describes the IdP. + + :type saml_assertion: string + :param saml_assertion: The base-64 encoded SAML authentication response + provided by the IdP. + For more information, see `Configuring a Relying Party and Adding + Claims`_ in the Using IAM guide. + + :type policy: string + :param policy: + An AWS IAM policy in JSON format. + + The temporary security credentials that are returned by this operation + have the permissions that are associated with the access policy of + the role being assumed, except for any permissions explicitly + denied by the policy you pass. These policies and any applicable + resource-based policies are evaluated when calls to AWS are made + using the temporary security credentials. + + The policy must be 2048 bytes or shorter, and its packed size must be + less than 450 bytes. + + :type duration_seconds: integer + :param duration_seconds: + The duration, in seconds, of the role session. The value can range from + 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the + value is set to 3600 seconds. An expiration can also be specified + in the SAML authentication response's `NotOnOrAfter` value. The + actual expiration time is whichever value is shorter. + + The maximum duration for a session is 1 hour, and the minimum duration + is 15 minutes, even if values outside this range are specified. + + """ + params = { + 'RoleArn': role_arn, + 'PrincipalArn': principal_arn, + 'SAMLAssertion': saml_assertion, + } + if policy is not None: + params['Policy'] = policy + if duration_seconds is not None: + params['DurationSeconds'] = duration_seconds + return self.get_object('AssumeRoleWithSAML', params, AssumedRole, + verb='POST') + + def assume_role_with_web_identity(self, role_arn, role_session_name, + web_identity_token, provider_id=None, + policy=None, duration_seconds=None): + """ + Returns a set of temporary security credentials for users who + have been authenticated in a mobile or web application with a + web identity provider, such as Login with Amazon, Facebook, or + Google. `AssumeRoleWithWebIdentity` is an API call that does + not require the use of AWS security credentials. Therefore, + you can distribute an application (for example, on mobile + devices) that requests temporary security credentials without + including long-term AWS credentials in the application or by + deploying server-based proxy services that use long-term AWS + credentials. For more information, see `Creating a Mobile + Application with Third-Party Sign-In`_ in AWS Security Token + Service . + + The temporary security credentials consist of an access key + ID, a secret access key, and a security token. Applications + can use these temporary security credentials to sign calls to + AWS service APIs. The credentials are valid for the duration + that you specified when calling `AssumeRoleWithWebIdentity`, + which can be from 900 seconds (15 minutes) to 3600 seconds (1 + hour). By default, the temporary security credentials are + valid for 1 hour. + + The temporary security credentials that are returned from the + `AssumeRoleWithWebIdentity` response have the permissions that + are associated with the access policy of the role being + assumed. You can further restrict the permissions of the + temporary security credentials by passing a policy in the + request. The resulting permissions are an intersection of the + role's access policy and the policy that you passed. These + policies and any applicable resource-based policies are + evaluated when calls to AWS service APIs are made using the + temporary security credentials. + + Before your application can call `AssumeRoleWithWebIdentity`, + you must have an identity token from a supported identity + provider and create a role that the application can assume. + The role that your application assumes must trust the identity + provider that is associated with the identity token. In other + words, the identity provider must be specified in the role's + trust policy. For more information, see ` Creating Temporary + Security Credentials for Mobile Apps Using Third-Party + Identity Providers`_. + + :type role_arn: string + :param role_arn: The Amazon Resource Name (ARN) of the role that the + caller is assuming. + + :type role_session_name: string + :param role_session_name: An identifier for the assumed role session. + Typically, you pass the name or identifier that is associated with + the user who is using your application. That way, the temporary + security credentials that your application will use are associated + with that user. This session name is included as part of the ARN + and assumed role ID in the `AssumedRoleUser` response element. + + :type web_identity_token: string + :param web_identity_token: The OAuth 2.0 access token or OpenID Connect + ID token that is provided by the identity provider. Your + application must get this token by authenticating the user who is + using your application with a web identity provider before the + application makes an `AssumeRoleWithWebIdentity` call. + + :type provider_id: string + :param provider_id: Specify this value only for OAuth access tokens. Do + not specify this value for OpenID Connect ID tokens, such as + `accounts.google.com`. This is the fully-qualified host component + of the domain name of the identity provider. Do not include URL + schemes and port numbers. Currently, `www.amazon.com` and + `graph.facebook.com` are supported. + + :type policy: string + :param policy: A supplemental policy that is associated with the + temporary security credentials from the `AssumeRoleWithWebIdentity` + call. The resulting permissions of the temporary security + credentials are an intersection of this policy and the access + policy that is associated with the role. Use this policy to further + restrict the permissions of the temporary security credentials. + + :type duration_seconds: integer + :param duration_seconds: The duration, in seconds, of the role session. + The value can range from 900 seconds (15 minutes) to 3600 seconds + (1 hour). By default, the value is set to 3600 seconds. + + """ + params = { + 'RoleArn': role_arn, + 'RoleSessionName': role_session_name, + 'WebIdentityToken': web_identity_token, + } + if provider_id is not None: + params['ProviderId'] = provider_id + if policy is not None: + params['Policy'] = policy + if duration_seconds is not None: + params['DurationSeconds'] = duration_seconds + return self.get_object( + 'AssumeRoleWithWebIdentity', + params, + AssumedRole, + verb='POST' + ) + + def decode_authorization_message(self, encoded_message): + """ + Decodes additional information about the authorization status + of a request from an encoded message returned in response to + an AWS request. + + For example, if a user is not authorized to perform an action + that he or she has requested, the request returns a + `Client.UnauthorizedOperation` response (an HTTP 403 + response). Some AWS actions additionally return an encoded + message that can provide details about this authorization + failure. + Only certain AWS actions return an encoded authorization + message. The documentation for an individual action indicates + whether that action returns an encoded message in addition to + returning an HTTP code. + The message is encoded because the details of the + authorization status can constitute privileged information + that the user who requested the action should not see. To + decode an authorization status message, a user must be granted + permissions via an IAM policy to request the + `DecodeAuthorizationMessage` ( + `sts:DecodeAuthorizationMessage`) action. + + The decoded message includes the following type of + information: + + + + Whether the request was denied due to an explicit deny or + due to the absence of an explicit allow. For more information, + see `Determining Whether a Request is Allowed or Denied`_ in + Using IAM . + + The principal who made the request. + + The requested action. + + The requested resource. + + The values of condition keys in the context of the user's + request. + + :type encoded_message: string + :param encoded_message: The encoded message that was returned with the + response. + + """ + params = { + 'EncodedMessage': encoded_message, + } + return self.get_object( + 'DecodeAuthorizationMessage', + params, + DecodeAuthorizationMessage, + verb='POST' + ) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/sts/credentials.py b/desktop/core/ext-py/boto-2.38.0/boto/sts/credentials.py new file mode 100644 index 0000000000000000000000000000000000000000..7ab631942cdaa15479592d5d268df0e6ccd0f6de --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/sts/credentials.py @@ -0,0 +1,237 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import os +import datetime + +import boto.utils +from boto.compat import json + + +class Credentials(object): + """ + :ivar access_key: The AccessKeyID. + :ivar secret_key: The SecretAccessKey. + :ivar session_token: The session token that must be passed with + requests to use the temporary credentials + :ivar expiration: The timestamp for when the credentials will expire + """ + + def __init__(self, parent=None): + self.parent = parent + self.access_key = None + self.secret_key = None + self.session_token = None + self.expiration = None + self.request_id = None + + @classmethod + def from_json(cls, json_doc): + """ + Create and return a new Session Token based on the contents + of a JSON document. + + :type json_doc: str + :param json_doc: A string containing a JSON document with a + previously saved Credentials object. + """ + d = json.loads(json_doc) + token = cls() + token.__dict__.update(d) + return token + + @classmethod + def load(cls, file_path): + """ + Create and return a new Session Token based on the contents + of a previously saved JSON-format file. + + :type file_path: str + :param file_path: The fully qualified path to the JSON-format + file containing the previously saved Session Token information. + """ + fp = open(file_path) + json_doc = fp.read() + fp.close() + return cls.from_json(json_doc) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'AccessKeyId': + self.access_key = value + elif name == 'SecretAccessKey': + self.secret_key = value + elif name == 'SessionToken': + self.session_token = value + elif name == 'Expiration': + self.expiration = value + elif name == 'RequestId': + self.request_id = value + else: + pass + + def to_dict(self): + """ + Return a Python dict containing the important information + about this Session Token. + """ + return {'access_key': self.access_key, + 'secret_key': self.secret_key, + 'session_token': self.session_token, + 'expiration': self.expiration, + 'request_id': self.request_id} + + def save(self, file_path): + """ + Persist a Session Token to a file in JSON format. + + :type path: str + :param path: The fully qualified path to the file where the + the Session Token data should be written. Any previous + data in the file will be overwritten. To help protect + the credentials contained in the file, the permissions + of the file will be set to readable/writable by owner only. + """ + fp = open(file_path, 'w') + json.dump(self.to_dict(), fp) + fp.close() + os.chmod(file_path, 0o600) + + def is_expired(self, time_offset_seconds=0): + """ + Checks to see if the Session Token is expired or not. By default + it will check to see if the Session Token is expired as of the + moment the method is called. However, you can supply an + optional parameter which is the number of seconds of offset + into the future for the check. For example, if you supply + a value of 5, this method will return a True if the Session + Token will be expired 5 seconds from this moment. + + :type time_offset_seconds: int + :param time_offset_seconds: The number of seconds into the future + to test the Session Token for expiration. + """ + now = datetime.datetime.utcnow() + if time_offset_seconds: + now = now + datetime.timedelta(seconds=time_offset_seconds) + ts = boto.utils.parse_ts(self.expiration) + delta = ts - now + return delta.total_seconds() <= 0 + + +class FederationToken(object): + """ + :ivar credentials: A Credentials object containing the credentials. + :ivar federated_user_arn: ARN specifying federated user using credentials. + :ivar federated_user_id: The ID of the federated user using credentials. + :ivar packed_policy_size: A percentage value indicating the size of + the policy in packed form + """ + + def __init__(self, parent=None): + self.parent = parent + self.credentials = None + self.federated_user_arn = None + self.federated_user_id = None + self.packed_policy_size = None + self.request_id = None + + def startElement(self, name, attrs, connection): + if name == 'Credentials': + self.credentials = Credentials() + return self.credentials + else: + return None + + def endElement(self, name, value, connection): + if name == 'Arn': + self.federated_user_arn = value + elif name == 'FederatedUserId': + self.federated_user_id = value + elif name == 'PackedPolicySize': + self.packed_policy_size = int(value) + elif name == 'RequestId': + self.request_id = value + else: + pass + + +class AssumedRole(object): + """ + :ivar user: The assumed role user. + :ivar credentials: A Credentials object containing the credentials. + """ + def __init__(self, connection=None, credentials=None, user=None): + self._connection = connection + self.credentials = credentials + self.user = user + + def startElement(self, name, attrs, connection): + if name == 'Credentials': + self.credentials = Credentials() + return self.credentials + elif name == 'AssumedRoleUser': + self.user = User() + return self.user + + def endElement(self, name, value, connection): + pass + + +class User(object): + """ + :ivar arn: The arn of the user assuming the role. + :ivar assume_role_id: The identifier of the assumed role. + """ + def __init__(self, arn=None, assume_role_id=None): + self.arn = arn + self.assume_role_id = assume_role_id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Arn': + self.arn = value + elif name == 'AssumedRoleId': + self.assume_role_id = value + + +class DecodeAuthorizationMessage(object): + """ + :ivar request_id: The request ID. + :ivar decoded_message: The decoded authorization message (may be JSON). + """ + def __init__(self, request_id=None, decoded_message=None): + self.request_id = request_id + self.decoded_message = decoded_message + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'requestId': + self.request_id = value + elif name == 'DecodedMessage': + self.decoded_message = value diff --git a/desktop/core/ext-py/boto-2.38.0/boto/support/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/support/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c114a9a37320f2d3c4bf90588acd58f0dccf0f76 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/support/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon Support service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.support.layer1 import SupportConnection + return get_regions('support', connection_cls=SupportConnection) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/support/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/support/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..cbc19b3a2d44a5a5e93980b20c19e907fdd834c2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/support/exceptions.py @@ -0,0 +1,58 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class CaseIdNotFound(JSONResponseError): + pass + + +class CaseCreationLimitExceeded(JSONResponseError): + pass + + +class InternalServerError(JSONResponseError): + pass + + +class AttachmentLimitExceeded(JSONResponseError): + pass + + +class DescribeAttachmentLimitExceeded(JSONResponseError): + pass + + +class AttachmentSetIdNotFound(JSONResponseError): + pass + + +class AttachmentSetExpired(JSONResponseError): + pass + + +class AttachmentIdNotFound(JSONResponseError): + pass + + +class AttachmentSetSizeLimitExceeded(JSONResponseError): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/support/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/support/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..33e83cc472d74c1cdbe251157a58d64922c388d2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/support/layer1.py @@ -0,0 +1,674 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.support import exceptions + + +class SupportConnection(AWSQueryConnection): + """ + AWS Support + The AWS Support API reference is intended for programmers who need + detailed information about the AWS Support operations and data + types. This service enables you to manage your AWS Support cases + programmatically. It uses HTTP methods that return results in JSON + format. + + The AWS Support service also exposes a set of `Trusted Advisor`_ + features. You can retrieve a list of checks and their + descriptions, get check results, specify checks to refresh, and + get the refresh status of checks. + + The following list describes the AWS Support case management + operations: + + + + **Service names, issue categories, and available severity + levels. **The DescribeServices and DescribeSeverityLevels + operations return AWS service names, service codes, service + categories, and problem severity levels. You use these values when + you call the CreateCase operation. + + **Case creation, case details, and case resolution.** The + CreateCase, DescribeCases, DescribeAttachment, and ResolveCase + operations create AWS Support cases, retrieve information about + cases, and resolve cases. + + **Case communication.** The DescribeCommunications, + AddCommunicationToCase, and AddAttachmentsToSet operations + retrieve and add communications and attachments to AWS Support + cases. + + + The following list describes the operations available from the AWS + Support service for Trusted Advisor: + + + + DescribeTrustedAdvisorChecks returns the list of checks that run + against your AWS resources. + + Using the `CheckId` for a specific check returned by + DescribeTrustedAdvisorChecks, you can call + DescribeTrustedAdvisorCheckResult to obtain the results for the + check you specified. + + DescribeTrustedAdvisorCheckSummaries returns summarized results + for one or more Trusted Advisor checks. + + RefreshTrustedAdvisorCheck requests that Trusted Advisor rerun a + specified check. + + DescribeTrustedAdvisorCheckRefreshStatuses reports the refresh + status of one or more checks. + + + For authentication of requests, AWS Support uses `Signature + Version 4 Signing Process`_. + + See `About the AWS Support API`_ in the AWS Support User Guide for + information about how to use this service to create and manage + your support cases, and how to call Trusted Advisor for results of + checks on your resources. + """ + APIVersion = "2013-04-15" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "support.us-east-1.amazonaws.com" + ServiceName = "Support" + TargetPrefix = "AWSSupport_20130415" + ResponseError = JSONResponseError + + _faults = { + "CaseCreationLimitExceeded": exceptions.CaseCreationLimitExceeded, + "AttachmentLimitExceeded": exceptions.AttachmentLimitExceeded, + "CaseIdNotFound": exceptions.CaseIdNotFound, + "DescribeAttachmentLimitExceeded": exceptions.DescribeAttachmentLimitExceeded, + "AttachmentSetIdNotFound": exceptions.AttachmentSetIdNotFound, + "InternalServerError": exceptions.InternalServerError, + "AttachmentSetExpired": exceptions.AttachmentSetExpired, + "AttachmentIdNotFound": exceptions.AttachmentIdNotFound, + "AttachmentSetSizeLimitExceeded": exceptions.AttachmentSetSizeLimitExceeded, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(SupportConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_attachments_to_set(self, attachments, attachment_set_id=None): + """ + Adds one or more attachments to an attachment set. If an + `AttachmentSetId` is not specified, a new attachment set is + created, and the ID of the set is returned in the response. If + an `AttachmentSetId` is specified, the attachments are added + to the specified set, if it exists. + + An attachment set is a temporary container for attachments + that are to be added to a case or case communication. The set + is available for one hour after it is created; the + `ExpiryTime` returned in the response indicates when the set + expires. The maximum number of attachments in a set is 3, and + the maximum size of any attachment in the set is 5 MB. + + :type attachment_set_id: string + :param attachment_set_id: The ID of the attachment set. If an + `AttachmentSetId` is not specified, a new attachment set is + created, and the ID of the set is returned in the response. If an + `AttachmentSetId` is specified, the attachments are added to the + specified set, if it exists. + + :type attachments: list + :param attachments: One or more attachments to add to the set. The + limit is 3 attachments per set, and the size limit is 5 MB per + attachment. + + """ + params = {'attachments': attachments, } + if attachment_set_id is not None: + params['attachmentSetId'] = attachment_set_id + return self.make_request(action='AddAttachmentsToSet', + body=json.dumps(params)) + + def add_communication_to_case(self, communication_body, case_id=None, + cc_email_addresses=None, + attachment_set_id=None): + """ + Adds additional customer communication to an AWS Support case. + You use the `CaseId` value to identify the case to add + communication to. You can list a set of email addresses to + copy on the communication using the `CcEmailAddresses` value. + The `CommunicationBody` value contains the text of the + communication. + + The response indicates the success or failure of the request. + + This operation implements a subset of the behavior on the AWS + Support `Your Support Cases`_ web form. + + :type case_id: string + :param case_id: The AWS Support case ID requested or returned in the + call. The case ID is an alphanumeric string formatted as shown in + this example: case- 12345678910-2013-c4c1d2bf33c5cf47 + + :type communication_body: string + :param communication_body: The body of an email communication to add to + the support case. + + :type cc_email_addresses: list + :param cc_email_addresses: The email addresses in the CC line of an + email to be added to the support case. + + :type attachment_set_id: string + :param attachment_set_id: The ID of a set of one or more attachments + for the communication to add to the case. Create the set by calling + AddAttachmentsToSet + + """ + params = {'communicationBody': communication_body, } + if case_id is not None: + params['caseId'] = case_id + if cc_email_addresses is not None: + params['ccEmailAddresses'] = cc_email_addresses + if attachment_set_id is not None: + params['attachmentSetId'] = attachment_set_id + return self.make_request(action='AddCommunicationToCase', + body=json.dumps(params)) + + def create_case(self, subject, communication_body, service_code=None, + severity_code=None, category_code=None, + cc_email_addresses=None, language=None, issue_type=None, + attachment_set_id=None): + """ + Creates a new case in the AWS Support Center. This operation + is modeled on the behavior of the AWS Support Center `Open a + new case`_ page. Its parameters require you to specify the + following information: + + + #. **IssueType.** The type of issue for the case. You can + specify either "customer-service" or "technical." If you do + not indicate a value, the default is "technical." + #. **ServiceCode.** The code for an AWS service. You obtain + the `ServiceCode` by calling DescribeServices. + #. **CategoryCode.** The category for the service defined for + the `ServiceCode` value. You also obtain the category code for + a service by calling DescribeServices. Each AWS service + defines its own set of category codes. + #. **SeverityCode.** A value that indicates the urgency of the + case, which in turn determines the response time according to + your service level agreement with AWS Support. You obtain the + SeverityCode by calling DescribeSeverityLevels. + #. **Subject.** The **Subject** field on the AWS Support + Center `Open a new case`_ page. + #. **CommunicationBody.** The **Description** field on the AWS + Support Center `Open a new case`_ page. + #. **AttachmentSetId.** The ID of a set of attachments that + has been created by using AddAttachmentsToSet. + #. **Language.** The human language in which AWS Support + handles the case. English and Japanese are currently + supported. + #. **CcEmailAddresses.** The AWS Support Center **CC** field + on the `Open a new case`_ page. You can list email addresses + to be copied on any correspondence about the case. The account + that opens the case is already identified by passing the AWS + Credentials in the HTTP POST method or in a method or function + call from one of the programming languages supported by an + `AWS SDK`_. + + + A successful CreateCase request returns an AWS Support case + number. Case numbers are used by the DescribeCases operation + to retrieve existing AWS Support cases. + + :type subject: string + :param subject: The title of the AWS Support case. + + :type service_code: string + :param service_code: The code for the AWS service returned by the call + to DescribeServices. + + :type severity_code: string + :param severity_code: The code for the severity level returned by the + call to DescribeSeverityLevels. + + :type category_code: string + :param category_code: The category of problem for the AWS Support case. + + :type communication_body: string + :param communication_body: The communication body text when you create + an AWS Support case by calling CreateCase. + + :type cc_email_addresses: list + :param cc_email_addresses: A list of email addresses that AWS Support + copies on case correspondence. + + :type language: string + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. + + :type issue_type: string + :param issue_type: The type of issue for the case. You can specify + either "customer-service" or "technical." If you do not indicate a + value, the default is "technical." + + :type attachment_set_id: string + :param attachment_set_id: The ID of a set of one or more attachments + for the case. Create the set by using AddAttachmentsToSet. + + """ + params = { + 'subject': subject, + 'communicationBody': communication_body, + } + if service_code is not None: + params['serviceCode'] = service_code + if severity_code is not None: + params['severityCode'] = severity_code + if category_code is not None: + params['categoryCode'] = category_code + if cc_email_addresses is not None: + params['ccEmailAddresses'] = cc_email_addresses + if language is not None: + params['language'] = language + if issue_type is not None: + params['issueType'] = issue_type + if attachment_set_id is not None: + params['attachmentSetId'] = attachment_set_id + return self.make_request(action='CreateCase', + body=json.dumps(params)) + + def describe_attachment(self, attachment_id): + """ + Returns the attachment that has the specified ID. Attachment + IDs are generated by the case management system when you add + an attachment to a case or case communication. Attachment IDs + are returned in the AttachmentDetails objects that are + returned by the DescribeCommunications operation. + + :type attachment_id: string + :param attachment_id: The ID of the attachment to return. Attachment + IDs are returned by the DescribeCommunications operation. + + """ + params = {'attachmentId': attachment_id, } + return self.make_request(action='DescribeAttachment', + body=json.dumps(params)) + + def describe_cases(self, case_id_list=None, display_id=None, + after_time=None, before_time=None, + include_resolved_cases=None, next_token=None, + max_results=None, language=None, + include_communications=None): + """ + Returns a list of cases that you specify by passing one or + more case IDs. In addition, you can filter the cases by date + by setting values for the `AfterTime` and `BeforeTime` request + parameters. + + Case data is available for 12 months after creation. If a case + was created more than 12 months ago, a request for data might + cause an error. + + The response returns the following in JSON format: + + + #. One or more CaseDetails data types. + #. One or more `NextToken` values, which specify where to + paginate the returned records represented by the `CaseDetails` + objects. + + :type case_id_list: list + :param case_id_list: A list of ID numbers of the support cases you want + returned. The maximum number of cases is 100. + + :type display_id: string + :param display_id: The ID displayed for a case in the AWS Support + Center user interface. + + :type after_time: string + :param after_time: The start date for a filtered date search on support + case communications. Case communications are available for 12 + months after creation. + + :type before_time: string + :param before_time: The end date for a filtered date search on support + case communications. Case communications are available for 12 + months after creation. + + :type include_resolved_cases: boolean + :param include_resolved_cases: Specifies whether resolved support cases + should be included in the DescribeCases results. The default is + false . + + :type next_token: string + :param next_token: A resumption point for pagination. + + :type max_results: integer + :param max_results: The maximum number of results to return before + paginating. + + :type language: string + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. + + :type include_communications: boolean + :param include_communications: Specifies whether communications should + be included in the DescribeCases results. The default is true . + + """ + params = {} + if case_id_list is not None: + params['caseIdList'] = case_id_list + if display_id is not None: + params['displayId'] = display_id + if after_time is not None: + params['afterTime'] = after_time + if before_time is not None: + params['beforeTime'] = before_time + if include_resolved_cases is not None: + params['includeResolvedCases'] = include_resolved_cases + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + if language is not None: + params['language'] = language + if include_communications is not None: + params['includeCommunications'] = include_communications + return self.make_request(action='DescribeCases', + body=json.dumps(params)) + + def describe_communications(self, case_id, before_time=None, + after_time=None, next_token=None, + max_results=None): + """ + Returns communications (and attachments) for one or more + support cases. You can use the `AfterTime` and `BeforeTime` + parameters to filter by date. You can use the `CaseId` + parameter to restrict the results to a particular case. + + Case data is available for 12 months after creation. If a case + was created more than 12 months ago, a request for data might + cause an error. + + You can use the `MaxResults` and `NextToken` parameters to + control the pagination of the result set. Set `MaxResults` to + the number of cases you want displayed on each page, and use + `NextToken` to specify the resumption of pagination. + + :type case_id: string + :param case_id: The AWS Support case ID requested or returned in the + call. The case ID is an alphanumeric string formatted as shown in + this example: case- 12345678910-2013-c4c1d2bf33c5cf47 + + :type before_time: string + :param before_time: The end date for a filtered date search on support + case communications. Case communications are available for 12 + months after creation. + + :type after_time: string + :param after_time: The start date for a filtered date search on support + case communications. Case communications are available for 12 + months after creation. + + :type next_token: string + :param next_token: A resumption point for pagination. + + :type max_results: integer + :param max_results: The maximum number of results to return before + paginating. + + """ + params = {'caseId': case_id, } + if before_time is not None: + params['beforeTime'] = before_time + if after_time is not None: + params['afterTime'] = after_time + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self.make_request(action='DescribeCommunications', + body=json.dumps(params)) + + def describe_services(self, service_code_list=None, language=None): + """ + Returns the current list of AWS services and a list of service + categories that applies to each one. You then use service + names and categories in your CreateCase requests. Each AWS + service has its own set of categories. + + The service codes and category codes correspond to the values + that are displayed in the **Service** and **Category** drop- + down lists on the AWS Support Center `Open a new case`_ page. + The values in those fields, however, do not necessarily match + the service codes and categories returned by the + `DescribeServices` request. Always use the service codes and + categories obtained programmatically. This practice ensures + that you always have the most recent set of service and + category codes. + + :type service_code_list: list + :param service_code_list: A JSON-formatted list of service codes + available for AWS services. + + :type language: string + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. + + """ + params = {} + if service_code_list is not None: + params['serviceCodeList'] = service_code_list + if language is not None: + params['language'] = language + return self.make_request(action='DescribeServices', + body=json.dumps(params)) + + def describe_severity_levels(self, language=None): + """ + Returns the list of severity levels that you can assign to an + AWS Support case. The severity level for a case is also a + field in the CaseDetails data type included in any CreateCase + request. + + :type language: string + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. + + """ + params = {} + if language is not None: + params['language'] = language + return self.make_request(action='DescribeSeverityLevels', + body=json.dumps(params)) + + def describe_trusted_advisor_check_refresh_statuses(self, check_ids): + """ + Returns the refresh status of the Trusted Advisor checks that + have the specified check IDs. Check IDs can be obtained by + calling DescribeTrustedAdvisorChecks. + + :type check_ids: list + :param check_ids: The IDs of the Trusted Advisor checks. + + """ + params = {'checkIds': check_ids, } + return self.make_request(action='DescribeTrustedAdvisorCheckRefreshStatuses', + body=json.dumps(params)) + + def describe_trusted_advisor_check_result(self, check_id, language=None): + """ + Returns the results of the Trusted Advisor check that has the + specified check ID. Check IDs can be obtained by calling + DescribeTrustedAdvisorChecks. + + The response contains a TrustedAdvisorCheckResult object, + which contains these three objects: + + + + TrustedAdvisorCategorySpecificSummary + + TrustedAdvisorResourceDetail + + TrustedAdvisorResourcesSummary + + + In addition, the response contains these fields: + + + + **Status.** The alert status of the check: "ok" (green), + "warning" (yellow), "error" (red), or "not_available". + + **Timestamp.** The time of the last refresh of the check. + + **CheckId.** The unique identifier for the check. + + :type check_id: string + :param check_id: The unique identifier for the Trusted Advisor check. + + :type language: string + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. + + """ + params = {'checkId': check_id, } + if language is not None: + params['language'] = language + return self.make_request(action='DescribeTrustedAdvisorCheckResult', + body=json.dumps(params)) + + def describe_trusted_advisor_check_summaries(self, check_ids): + """ + Returns the summaries of the results of the Trusted Advisor + checks that have the specified check IDs. Check IDs can be + obtained by calling DescribeTrustedAdvisorChecks. + + The response contains an array of TrustedAdvisorCheckSummary + objects. + + :type check_ids: list + :param check_ids: The IDs of the Trusted Advisor checks. + + """ + params = {'checkIds': check_ids, } + return self.make_request(action='DescribeTrustedAdvisorCheckSummaries', + body=json.dumps(params)) + + def describe_trusted_advisor_checks(self, language): + """ + Returns information about all available Trusted Advisor + checks, including name, ID, category, description, and + metadata. You must specify a language code; English ("en") and + Japanese ("ja") are currently supported. The response contains + a TrustedAdvisorCheckDescription for each check. + + :type language: string + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. + + """ + params = {'language': language, } + return self.make_request(action='DescribeTrustedAdvisorChecks', + body=json.dumps(params)) + + def refresh_trusted_advisor_check(self, check_id): + """ + Requests a refresh of the Trusted Advisor check that has the + specified check ID. Check IDs can be obtained by calling + DescribeTrustedAdvisorChecks. + + The response contains a RefreshTrustedAdvisorCheckResult + object, which contains these fields: + + + + **Status.** The refresh status of the check: "none", + "enqueued", "processing", "success", or "abandoned". + + **MillisUntilNextRefreshable.** The amount of time, in + milliseconds, until the check is eligible for refresh. + + **CheckId.** The unique identifier for the check. + + :type check_id: string + :param check_id: The unique identifier for the Trusted Advisor check. + + """ + params = {'checkId': check_id, } + return self.make_request(action='RefreshTrustedAdvisorCheck', + body=json.dumps(params)) + + def resolve_case(self, case_id=None): + """ + Takes a `CaseId` and returns the initial state of the case + along with the state of the case after the call to ResolveCase + completed. + + :type case_id: string + :param case_id: The AWS Support case ID requested or returned in the + call. The case ID is an alphanumeric string formatted as shown in + this example: case- 12345678910-2013-c4c1d2bf33c5cf47 + + """ + params = {} + if case_id is not None: + params['caseId'] = case_id + return self.make_request(action='ResolveCase', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/swf/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/swf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bebbd6963230b705084fa0a9a905f47c1fdc3210 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/swf/__init__.py @@ -0,0 +1,46 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.ec2.regioninfo import RegionInfo +from boto.regioninfo import get_regions, load_regions +import boto.swf.layer1 + +REGION_ENDPOINTS = load_regions().get('swf', {}) + + +def regions(**kw_params): + """ + Get all available regions for the Amazon Simple Workflow service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + return get_regions('swf', connection_cls=boto.swf.layer1.Layer1) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/desktop/core/ext-py/boto-2.38.0/boto/swf/exceptions.py b/desktop/core/ext-py/boto-2.38.0/boto/swf/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..f3ac6aeb73dca957f50f6acc04e50113fba5d6ba --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/swf/exceptions.py @@ -0,0 +1,44 @@ +""" +Exceptions that are specific to the swf module. + +This module subclasses the base SWF response exception, +boto.exceptions.SWFResponseError, for some of the SWF specific faults. +""" +from boto.exception import SWFResponseError + + +class SWFDomainAlreadyExistsError(SWFResponseError): + """ + Raised when when the domain already exists. + """ + pass + + +class SWFLimitExceededError(SWFResponseError): + """ + Raised when when a system imposed limitation has been reached. + """ + pass + + +class SWFOperationNotPermittedError(SWFResponseError): + """ + Raised when (reserved for future use). + """ + + +class SWFTypeAlreadyExistsError(SWFResponseError): + """ + Raised when when the workflow type or activity type already exists. + """ + pass + + +class SWFWorkflowExecutionAlreadyStartedError(SWFResponseError): + """ + Raised when an open execution with the same workflow_id is already running + in the specified domain. + """ + + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/swf/layer1.py b/desktop/core/ext-py/boto-2.38.0/boto/swf/layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..bba16ad270a6cecc68f7e202ac579239e8d4919d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/swf/layer1.py @@ -0,0 +1,1512 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import time + +import boto +from boto.connection import AWSAuthConnection +from boto.provider import Provider +from boto.exception import SWFResponseError +from boto.swf import exceptions as swf_exceptions +from boto.compat import json + +# +# To get full debug output, uncomment the following line and set the +# value of Debug to be 2 +# +#boto.set_stream_logger('swf') +Debug = 0 + + +class Layer1(AWSAuthConnection): + """ + Low-level interface to Simple WorkFlow Service. + """ + + DefaultRegionName = 'us-east-1' + """The default region name for Simple Workflow.""" + + ServiceName = 'com.amazonaws.swf.service.model.SimpleWorkflowService' + """The name of the Service""" + + # In some cases, the fault response __type value is mapped to + # an exception class more specific than SWFResponseError. + _fault_excp = { + 'com.amazonaws.swf.base.model#DomainAlreadyExistsFault': + swf_exceptions.SWFDomainAlreadyExistsError, + 'com.amazonaws.swf.base.model#LimitExceededFault': + swf_exceptions.SWFLimitExceededError, + 'com.amazonaws.swf.base.model#OperationNotPermittedFault': + swf_exceptions.SWFOperationNotPermittedError, + 'com.amazonaws.swf.base.model#TypeAlreadyExistsFault': + swf_exceptions.SWFTypeAlreadyExistsError, + 'com.amazonaws.swf.base.model#WorkflowExecutionAlreadyStartedFault': + swf_exceptions.SWFWorkflowExecutionAlreadyStartedError, + } + + ResponseError = SWFResponseError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + debug=0, session_token=None, region=None, profile_name=None): + if not region: + region_name = boto.config.get('SWF', 'region', + self.DefaultRegionName) + for reg in boto.swf.regions(): + if reg.name == region_name: + region = reg + break + + self.region = region + super(Layer1, self).__init__(self.region.endpoint, + aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, + debug, session_token, profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + @classmethod + def _normalize_request_dict(cls, data): + """ + This class method recurses through request data dictionary and removes + any default values. + + :type data: dict + :param data: Specifies request parameters with default values to be removed. + """ + for item in list(data.keys()): + if isinstance(data[item], dict): + cls._normalize_request_dict(data[item]) + if data[item] in (None, {}): + del data[item] + + def json_request(self, action, data, object_hook=None): + """ + This method wraps around make_request() to normalize and serialize the + dictionary with request parameters. + + :type action: string + :param action: Specifies an SWF action. + + :type data: dict + :param data: Specifies request parameters associated with the action. + """ + self._normalize_request_dict(data) + json_input = json.dumps(data) + return self.make_request(action, json_input, object_hook) + + def make_request(self, action, body='', object_hook=None): + """ + :raises: ``SWFResponseError`` if response status is not 200. + """ + headers = {'X-Amz-Target': '%s.%s' % (self.ServiceName, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/json; charset=UTF-8', + 'Content-Encoding': 'amz-1.0', + 'Content-Length': str(len(body))} + http_request = self.build_base_http_request('POST', '/', '/', + {}, headers, body, None) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body, object_hook=object_hook) + else: + return None + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + # Certain faults get mapped to more specific exception classes. + excp_cls = self._fault_excp.get(fault_name, self.ResponseError) + raise excp_cls(response.status, response.reason, body=json_body) + + # Actions related to Activities + + def poll_for_activity_task(self, domain, task_list, identity=None): + """ + Used by workers to get an ActivityTask from the specified + activity taskList. This initiates a long poll, where the + service holds the HTTP connection open and responds as soon as + a task becomes available. The maximum time the service holds + on to the request before responding is 60 seconds. If no task + is available within 60 seconds, the poll will return an empty + result. An empty result, in this context, means that an + ActivityTask is returned, but that the value of taskToken is + an empty string. If a task is returned, the worker should use + its type to identify and process it correctly. + + :type domain: string + :param domain: The name of the domain that contains the task + lists being polled. + + :type task_list: string + :param task_list: Specifies the task list to poll for activity tasks. + + :type identity: string + :param identity: Identity of the worker making the request, which + is recorded in the ActivityTaskStarted event in the workflow + history. This enables diagnostic tracing when problems arise. + The form of this identity is user defined. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('PollForActivityTask', { + 'domain': domain, + 'taskList': {'name': task_list}, + 'identity': identity, + }) + + def respond_activity_task_completed(self, task_token, result=None): + """ + Used by workers to tell the service that the ActivityTask + identified by the taskToken completed successfully with a + result (if provided). + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type result: string + :param result: The result of the activity task. It is a free + form string that is implementation specific. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RespondActivityTaskCompleted', { + 'taskToken': task_token, + 'result': result, + }) + + def respond_activity_task_failed(self, task_token, + details=None, reason=None): + """ + Used by workers to tell the service that the ActivityTask + identified by the taskToken has failed with reason (if + specified). + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type details: string + :param details: Optional detailed information about the failure. + + :type reason: string + :param reason: Description of the error that may assist in diagnostics. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RespondActivityTaskFailed', { + 'taskToken': task_token, + 'details': details, + 'reason': reason, + }) + + def respond_activity_task_canceled(self, task_token, details=None): + """ + Used by workers to tell the service that the ActivityTask + identified by the taskToken was successfully + canceled. Additional details can be optionally provided using + the details argument. + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type details: string + :param details: Optional detailed information about the failure. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RespondActivityTaskCanceled', { + 'taskToken': task_token, + 'details': details, + }) + + def record_activity_task_heartbeat(self, task_token, details=None): + """ + Used by activity workers to report to the service that the + ActivityTask represented by the specified taskToken is still + making progress. The worker can also (optionally) specify + details of the progress, for example percent complete, using + the details parameter. This action can also be used by the + worker as a mechanism to check if cancellation is being + requested for the activity task. If a cancellation is being + attempted for the specified task, then the boolean + cancelRequested flag returned by the service is set to true. + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type details: string + :param details: If specified, contains details about the + progress of the task. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RecordActivityTaskHeartbeat', { + 'taskToken': task_token, + 'details': details, + }) + + # Actions related to Deciders + + def poll_for_decision_task(self, domain, task_list, identity=None, + maximum_page_size=None, + next_page_token=None, + reverse_order=None): + """ + Used by deciders to get a DecisionTask from the specified + decision taskList. A decision task may be returned for any + open workflow execution that is using the specified task + list. The task includes a paginated view of the history of the + workflow execution. The decider should use the workflow type + and the history to determine how to properly handle the task. + + :type domain: string + :param domain: The name of the domain containing the task + lists to poll. + + :type task_list: string + :param task_list: Specifies the task list to poll for decision tasks. + + :type identity: string + :param identity: Identity of the decider making the request, + which is recorded in the DecisionTaskStarted event in the + workflow history. This enables diagnostic tracing when + problems arise. The form of this identity is user defined. + + :type maximum_page_size: integer :param maximum_page_size: The + maximum number of history events returned in each page. The + default is 100, but the caller can override this value to a + page size smaller than the default. You cannot specify a page + size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the results are being paginated. + To get the next page of results, repeat the call with the + returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the events in + reverse order. By default the results are returned in + ascending order of the eventTimestamp of the events. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('PollForDecisionTask', { + 'domain': domain, + 'taskList': {'name': task_list}, + 'identity': identity, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def respond_decision_task_completed(self, task_token, + decisions=None, + execution_context=None): + """ + Used by deciders to tell the service that the DecisionTask + identified by the taskToken has successfully completed. + The decisions argument specifies the list of decisions + made while processing the task. + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type decisions: list + :param decisions: The list of decisions (possibly empty) made by + the decider while processing this decision task. See the docs + for the Decision structure for details. + + :type execution_context: string + :param execution_context: User defined context to add to + workflow execution. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RespondDecisionTaskCompleted', { + 'taskToken': task_token, + 'decisions': decisions, + 'executionContext': execution_context, + }) + + def request_cancel_workflow_execution(self, domain, workflow_id, + run_id=None): + """ + Records a WorkflowExecutionCancelRequested event in the + currently running workflow execution identified by the given + domain, workflowId, and runId. This logically requests the + cancellation of the workflow execution as a whole. It is up to + the decider to take appropriate actions when it receives an + execution history with this event. + + :type domain: string + :param domain: The name of the domain containing the workflow + execution to cancel. + + :type run_id: string + :param run_id: The runId of the workflow execution to cancel. + + :type workflow_id: string + :param workflow_id: The workflowId of the workflow execution + to cancel. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RequestCancelWorkflowExecution', { + 'domain': domain, + 'workflowId': workflow_id, + 'runId': run_id, + }) + + def start_workflow_execution(self, domain, workflow_id, + workflow_name, workflow_version, + task_list=None, child_policy=None, + execution_start_to_close_timeout=None, + input=None, tag_list=None, + task_start_to_close_timeout=None): + """ + Starts an execution of the workflow type in the specified + domain using the provided workflowId and input data. + + :type domain: string + :param domain: The name of the domain in which the workflow + execution is created. + + :type workflow_id: string + :param workflow_id: The user defined identifier associated with + the workflow execution. You can use this to associate a + custom identifier with the workflow execution. You may + specify the same identifier if a workflow execution is + logically a restart of a previous execution. You cannot + have two open workflow executions with the same workflowId + at the same time. + + :type workflow_name: string + :param workflow_name: The name of the workflow type. + + :type workflow_version: string + :param workflow_version: The version of the workflow type. + + :type task_list: string + :param task_list: The task list to use for the decision tasks + generated for this workflow execution. This overrides the + defaultTaskList specified when registering the workflow type. + + :type child_policy: string + :param child_policy: If set, specifies the policy to use for the + child workflow executions of this workflow execution if it + is terminated, by calling the TerminateWorkflowExecution + action explicitly or due to an expired timeout. This policy + overrides the default child policy specified when registering + the workflow type using RegisterWorkflowType. The supported + child policies are: + + * TERMINATE: the child executions will be terminated. + * REQUEST_CANCEL: a request to cancel will be attempted + for each child execution by recording a + WorkflowExecutionCancelRequested event in its history. + It is up to the decider to take appropriate actions + when it receives an execution history with this event. + * ABANDON: no action will be taken. The child executions + will continue to run. + + :type execution_start_to_close_timeout: string + :param execution_start_to_close_timeout: The total duration for + this workflow execution. This overrides the + defaultExecutionStartToCloseTimeout specified when + registering the workflow type. + + :type input: string + :param input: The input for the workflow + execution. This is a free form string which should be + meaningful to the workflow you are starting. This input is + made available to the new workflow execution in the + WorkflowExecutionStarted history event. + + :type tag_list: list :param tag_list: The list of tags to + associate with the workflow execution. You can specify a + maximum of 5 tags. You can list workflow executions with a + specific tag by calling list_open_workflow_executions or + list_closed_workflow_executions and specifying a TagFilter. + + :type task_start_to_close_timeout: string :param + task_start_to_close_timeout: Specifies the maximum duration of + decision tasks for this workflow execution. This parameter + overrides the defaultTaskStartToCloseTimout specified when + registering the workflow type using register_workflow_type. + + :raises: UnknownResourceFault, TypeDeprecatedFault, + SWFWorkflowExecutionAlreadyStartedError, SWFLimitExceededError, + SWFOperationNotPermittedError, DefaultUndefinedFault + """ + return self.json_request('StartWorkflowExecution', { + 'domain': domain, + 'workflowId': workflow_id, + 'workflowType': {'name': workflow_name, + 'version': workflow_version}, + 'taskList': {'name': task_list}, + 'childPolicy': child_policy, + 'executionStartToCloseTimeout': execution_start_to_close_timeout, + 'input': input, + 'tagList': tag_list, + 'taskStartToCloseTimeout': task_start_to_close_timeout, + + }) + + def signal_workflow_execution(self, domain, signal_name, workflow_id, + input=None, run_id=None): + """ + Records a WorkflowExecutionSignaled event in the workflow + execution history and creates a decision task for the workflow + execution identified by the given domain, workflowId and + runId. The event is recorded with the specified user defined + signalName and input (if provided). + + :type domain: string + :param domain: The name of the domain containing the workflow + execution to signal. + + :type signal_name: string + :param signal_name: The name of the signal. This name must be + meaningful to the target workflow. + + :type workflow_id: string + :param workflow_id: The workflowId of the workflow execution + to signal. + + :type input: string + :param input: Data to attach to the WorkflowExecutionSignaled + event in the target workflow execution's history. + + :type run_id: string + :param run_id: The runId of the workflow execution to signal. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('SignalWorkflowExecution', { + 'domain': domain, + 'signalName': signal_name, + 'workflowId': workflow_id, + 'input': input, + 'runId': run_id, + }) + + def terminate_workflow_execution(self, domain, workflow_id, + child_policy=None, details=None, + reason=None, run_id=None): + """ + Records a WorkflowExecutionTerminated event and forces closure + of the workflow execution identified by the given domain, + runId, and workflowId. The child policy, registered with the + workflow type or specified when starting this execution, is + applied to any open child workflow executions of this workflow + execution. + + :type domain: string + :param domain: The domain of the workflow execution to terminate. + + :type workflow_id: string + :param workflow_id: The workflowId of the workflow execution + to terminate. + + :type child_policy: string + :param child_policy: If set, specifies the policy to use for + the child workflow executions of the workflow execution being + terminated. This policy overrides the child policy specified + for the workflow execution at registration time or when + starting the execution. The supported child policies are: + + * TERMINATE: the child executions will be terminated. + + * REQUEST_CANCEL: a request to cancel will be attempted + for each child execution by recording a + WorkflowExecutionCancelRequested event in its + history. It is up to the decider to take appropriate + actions when it receives an execution history with this + event. + + * ABANDON: no action will be taken. The child executions + will continue to run. + + :type details: string + :param details: Optional details for terminating the + workflow execution. + + :type reason: string + :param reason: An optional descriptive reason for terminating + the workflow execution. + + :type run_id: string + :param run_id: The runId of the workflow execution to terminate. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('TerminateWorkflowExecution', { + 'domain': domain, + 'workflowId': workflow_id, + 'childPolicy': child_policy, + 'details': details, + 'reason': reason, + 'runId': run_id, + }) + +# Actions related to Administration + +## Activity Management + + def register_activity_type(self, domain, name, version, task_list=None, + default_task_heartbeat_timeout=None, + default_task_schedule_to_close_timeout=None, + default_task_schedule_to_start_timeout=None, + default_task_start_to_close_timeout=None, + description=None): + """ + Registers a new activity type along with its configuration + settings in the specified domain. + + :type domain: string + :param domain: The name of the domain in which this activity is + to be registered. + + :type name: string + :param name: The name of the activity type within the domain. + + :type version: string + :param version: The version of the activity type. + + :type task_list: string + :param task_list: If set, specifies the default task list to + use for scheduling tasks of this activity type. This default + task list is used if a task list is not provided when a task + is scheduled through the schedule_activity_task Decision. + + :type default_task_heartbeat_timeout: string + :param default_task_heartbeat_timeout: If set, specifies the + default maximum time before which a worker processing a task + of this type must report progress by calling + RecordActivityTaskHeartbeat. If the timeout is exceeded, the + activity task is automatically timed out. This default can be + overridden when scheduling an activity task using the + ScheduleActivityTask Decision. If the activity worker + subsequently attempts to record a heartbeat or returns a + result, the activity worker receives an UnknownResource + fault. In this case, Amazon SWF no longer considers the + activity task to be valid; the activity worker should clean up + the activity task.no docs + + :type default_task_schedule_to_close_timeout: string + :param default_task_schedule_to_close_timeout: If set, + specifies the default maximum duration for a task of this + activity type. This default can be overridden when scheduling + an activity task using the ScheduleActivityTask Decision.no + docs + + :type default_task_schedule_to_start_timeout: string + :param default_task_schedule_to_start_timeout: If set, + specifies the default maximum duration that a task of this + activity type can wait before being assigned to a worker. This + default can be overridden when scheduling an activity task + using the ScheduleActivityTask Decision. + + :type default_task_start_to_close_timeout: string + :param default_task_start_to_close_timeout: If set, specifies + the default maximum duration that a worker can take to process + tasks of this activity type. This default can be overridden + when scheduling an activity task using the + ScheduleActivityTask Decision. + + :type description: string + :param description: A textual description of the activity type. + + :raises: SWFTypeAlreadyExistsError, SWFLimitExceededError, + UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RegisterActivityType', { + 'domain': domain, + 'name': name, + 'version': version, + 'defaultTaskList': {'name': task_list}, + 'defaultTaskHeartbeatTimeout': default_task_heartbeat_timeout, + 'defaultTaskScheduleToCloseTimeout': default_task_schedule_to_close_timeout, + 'defaultTaskScheduleToStartTimeout': default_task_schedule_to_start_timeout, + 'defaultTaskStartToCloseTimeout': default_task_start_to_close_timeout, + 'description': description, + }) + + def deprecate_activity_type(self, domain, activity_name, activity_version): + """ + Returns information about the specified activity type. This + includes configuration settings provided at registration time + as well as other general information about the type. + + :type domain: string + :param domain: The name of the domain in which the activity + type is registered. + + :type activity_name: string + :param activity_name: The name of this activity. + + :type activity_version: string + :param activity_version: The version of this activity. + + :raises: UnknownResourceFault, TypeDeprecatedFault, + SWFOperationNotPermittedError + """ + return self.json_request('DeprecateActivityType', { + 'domain': domain, + 'activityType': {'name': activity_name, + 'version': activity_version} + }) + +## Workflow Management + + def register_workflow_type(self, domain, name, version, + task_list=None, + default_child_policy=None, + default_execution_start_to_close_timeout=None, + default_task_start_to_close_timeout=None, + description=None): + """ + Registers a new workflow type and its configuration settings + in the specified domain. + + :type domain: string + :param domain: The name of the domain in which to register + the workflow type. + + :type name: string + :param name: The name of the workflow type. + + :type version: string + :param version: The version of the workflow type. + + :type task_list: list of name, version of tasks + :param task_list: If set, specifies the default task list to use + for scheduling decision tasks for executions of this workflow + type. This default is used only if a task list is not provided + when starting the execution through the StartWorkflowExecution + Action or StartChildWorkflowExecution Decision. + + :type default_child_policy: string + + :param default_child_policy: If set, specifies the default + policy to use for the child workflow executions when a + workflow execution of this type is terminated, by calling the + TerminateWorkflowExecution action explicitly or due to an + expired timeout. This default can be overridden when starting + a workflow execution using the StartWorkflowExecution action + or the StartChildWorkflowExecution Decision. The supported + child policies are: + + * TERMINATE: the child executions will be terminated. + + * REQUEST_CANCEL: a request to cancel will be attempted + for each child execution by recording a + WorkflowExecutionCancelRequested event in its + history. It is up to the decider to take appropriate + actions when it receives an execution history with this + event. + + * ABANDON: no action will be taken. The child executions + will continue to run.no docs + + :type default_execution_start_to_close_timeout: string + :param default_execution_start_to_close_timeout: If set, + specifies the default maximum duration for executions of this + workflow type. You can override this default when starting an + execution through the StartWorkflowExecution Action or + StartChildWorkflowExecution Decision. + + :type default_task_start_to_close_timeout: string + :param default_task_start_to_close_timeout: If set, specifies + the default maximum duration of decision tasks for this + workflow type. This default can be overridden when starting a + workflow execution using the StartWorkflowExecution action or + the StartChildWorkflowExecution Decision. + + :type description: string + :param description: Textual description of the workflow type. + + :raises: SWFTypeAlreadyExistsError, SWFLimitExceededError, + UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RegisterWorkflowType', { + 'domain': domain, + 'name': name, + 'version': version, + 'defaultTaskList': {'name': task_list}, + 'defaultChildPolicy': default_child_policy, + 'defaultExecutionStartToCloseTimeout': default_execution_start_to_close_timeout, + 'defaultTaskStartToCloseTimeout': default_task_start_to_close_timeout, + 'description': description, + }) + + def deprecate_workflow_type(self, domain, workflow_name, workflow_version): + """ + Deprecates the specified workflow type. After a workflow type + has been deprecated, you cannot create new executions of that + type. Executions that were started before the type was + deprecated will continue to run. A deprecated workflow type + may still be used when calling visibility actions. + + :type domain: string + :param domain: The name of the domain in which the workflow + type is registered. + + :type workflow_name: string + :param workflow_name: The name of the workflow type. + + :type workflow_version: string + :param workflow_version: The version of the workflow type. + + :raises: UnknownResourceFault, TypeDeprecatedFault, + SWFOperationNotPermittedError + """ + return self.json_request('DeprecateWorkflowType', { + 'domain': domain, + 'workflowType': {'name': workflow_name, + 'version': workflow_version}, + }) + +## Domain Management + + def register_domain(self, name, + workflow_execution_retention_period_in_days, + description=None): + """ + Registers a new domain. + + :type name: string + :param name: Name of the domain to register. The name must be unique. + + :type workflow_execution_retention_period_in_days: string + + :param workflow_execution_retention_period_in_days: Specifies + the duration *in days* for which the record (including the + history) of workflow executions in this domain should be kept + by the service. After the retention period, the workflow + execution will not be available in the results of visibility + calls. If a duration of NONE is specified, the records for + workflow executions in this domain are not retained at all. + + :type description: string + :param description: Textual description of the domain. + + :raises: SWFDomainAlreadyExistsError, SWFLimitExceededError, + SWFOperationNotPermittedError + """ + return self.json_request('RegisterDomain', { + 'name': name, + 'workflowExecutionRetentionPeriodInDays': workflow_execution_retention_period_in_days, + 'description': description, + }) + + def deprecate_domain(self, name): + """ + Deprecates the specified domain. After a domain has been + deprecated it cannot be used to create new workflow executions + or register new types. However, you can still use visibility + actions on this domain. Deprecating a domain also deprecates + all activity and workflow types registered in the + domain. Executions that were started before the domain was + deprecated will continue to run. + + :type name: string + :param name: The name of the domain to deprecate. + + :raises: UnknownResourceFault, DomainDeprecatedFault, + SWFOperationNotPermittedError + """ + return self.json_request('DeprecateDomain', {'name': name}) + +# Visibility Actions + +## Activity Visibility + + def list_activity_types(self, domain, registration_status, + name=None, + maximum_page_size=None, + next_page_token=None, reverse_order=None): + """ + Returns information about all activities registered in the + specified domain that match the specified name and + registration status. The result includes information like + creation date, current status of the activity, etc. The + results may be split into multiple pages. To retrieve + subsequent pages, make the call again using the nextPageToken + returned by the initial call. + + :type domain: string + :param domain: The name of the domain in which the activity + types have been registered. + + :type registration_status: string + :param registration_status: Specifies the registration status + of the activity types to list. Valid values are: + + * REGISTERED + * DEPRECATED + + :type name: string + :param name: If specified, only lists the activity types that + have this name. + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextResultToken was returned, the results have more than one + page. To get the next page of results, repeat the call with + the nextPageToken and keep all other arguments unchanged. + + :type reverse_order: boolean + + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + ascending alphabetical order of the name of the activity + types. + + :raises: SWFOperationNotPermittedError, UnknownResourceFault + """ + return self.json_request('ListActivityTypes', { + 'domain': domain, + 'name': name, + 'registrationStatus': registration_status, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def describe_activity_type(self, domain, activity_name, activity_version): + """ + Returns information about the specified activity type. This + includes configuration settings provided at registration time + as well as other general information about the type. + + :type domain: string + :param domain: The name of the domain in which the activity + type is registered. + + :type activity_name: string + :param activity_name: The name of this activity. + + :type activity_version: string + :param activity_version: The version of this activity. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('DescribeActivityType', { + 'domain': domain, + 'activityType': {'name': activity_name, + 'version': activity_version} + }) + +## Workflow Visibility + + def list_workflow_types(self, domain, registration_status, + maximum_page_size=None, name=None, + next_page_token=None, reverse_order=None): + """ + Returns information about workflow types in the specified + domain. The results may be split into multiple pages that can + be retrieved by making the call repeatedly. + + :type domain: string + :param domain: The name of the domain in which the workflow + types have been registered. + + :type registration_status: string + :param registration_status: Specifies the registration status + of the activity types to list. Valid values are: + + * REGISTERED + * DEPRECATED + + :type name: string + :param name: If specified, lists the workflow type with this name. + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the results are being + paginated. To get the next page of results, repeat the call + with the returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + ascending alphabetical order of the name of the workflow + types. + + :raises: SWFOperationNotPermittedError, UnknownResourceFault + """ + return self.json_request('ListWorkflowTypes', { + 'domain': domain, + 'name': name, + 'registrationStatus': registration_status, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def describe_workflow_type(self, domain, workflow_name, workflow_version): + """ + Returns information about the specified workflow type. This + includes configuration settings specified when the type was + registered and other information such as creation date, + current status, etc. + + :type domain: string + :param domain: The name of the domain in which this workflow + type is registered. + + :type workflow_name: string + :param workflow_name: The name of the workflow type. + + :type workflow_version: string + :param workflow_version: The version of the workflow type. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('DescribeWorkflowType', { + 'domain': domain, + 'workflowType': {'name': workflow_name, + 'version': workflow_version} + }) + +## Workflow Execution Visibility + + def describe_workflow_execution(self, domain, run_id, workflow_id): + """ + Returns information about the specified workflow execution + including its type and some statistics. + + :type domain: string + :param domain: The name of the domain containing the + workflow execution. + + :type run_id: string + :param run_id: A system generated unique identifier for the + workflow execution. + + :type workflow_id: string + :param workflow_id: The user defined identifier associated + with the workflow execution. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('DescribeWorkflowExecution', { + 'domain': domain, + 'execution': {'runId': run_id, + 'workflowId': workflow_id}, + }) + + def get_workflow_execution_history(self, domain, run_id, workflow_id, + maximum_page_size=None, + next_page_token=None, + reverse_order=None): + """ + Returns the history of the specified workflow execution. The + results may be split into multiple pages. To retrieve + subsequent pages, make the call again using the nextPageToken + returned by the initial call. + + :type domain: string + :param domain: The name of the domain containing the + workflow execution. + + :type run_id: string + :param run_id: A system generated unique identifier for the + workflow execution. + + :type workflow_id: string + :param workflow_id: The user defined identifier associated + with the workflow execution. + + :type maximum_page_size: integer + :param maximum_page_size: Specifies the maximum number of + history events returned in one page. The next page in the + result is identified by the NextPageToken returned. By default + 100 history events are returned in a page but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size larger than 100. + + :type next_page_token: string + :param next_page_token: If a NextPageToken is returned, the + result has more than one pages. To get the next page, repeat + the call and specify the nextPageToken with all other + arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the events in + reverse order. By default the results are returned in + ascending order of the eventTimeStamp of the events. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('GetWorkflowExecutionHistory', { + 'domain': domain, + 'execution': {'runId': run_id, + 'workflowId': workflow_id}, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def count_open_workflow_executions(self, domain, latest_date, oldest_date, + tag=None, + workflow_id=None, + workflow_name=None, + workflow_version=None): + """ + Returns the number of open workflow executions within the + given domain that meet the specified filtering criteria. + + .. note: + workflow_id, workflow_name/workflow_version and tag are mutually + exclusive. You can specify at most one of these in a request. + + :type domain: string + :param domain: The name of the domain containing the + workflow executions to count. + + :type latest_date: timestamp + :param latest_date: Specifies the latest start or close date + and time to return. + + :type oldest_date: timestamp + :param oldest_date: Specifies the oldest start or close date + and time to return. + + :type workflow_name: string + :param workflow_name: Name of the workflow type to filter on. + + :type workflow_version: string + :param workflow_version: Version of the workflow type to filter on. + + :type tag: string + :param tag: If specified, only executions that have a tag + that matches the filter are counted. + + :type workflow_id: string + :param workflow_id: If specified, only workflow executions + matching the workflow_id are counted. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('CountOpenWorkflowExecutions', { + 'domain': domain, + 'startTimeFilter': {'oldestDate': oldest_date, + 'latestDate': latest_date}, + 'typeFilter': {'name': workflow_name, + 'version': workflow_version}, + 'executionFilter': {'workflowId': workflow_id}, + 'tagFilter': {'tag': tag}, + }) + + def list_open_workflow_executions(self, domain, + oldest_date, + latest_date=None, + tag=None, + workflow_id=None, + workflow_name=None, + workflow_version=None, + maximum_page_size=None, + next_page_token=None, + reverse_order=None): + """ + Returns the list of open workflow executions within the + given domain that meet the specified filtering criteria. + + .. note: + workflow_id, workflow_name/workflow_version + and tag are mutually exclusive. You can specify at most + one of these in a request. + + :type domain: string + :param domain: The name of the domain containing the + workflow executions to count. + + :type latest_date: timestamp + :param latest_date: Specifies the latest start or close date + and time to return. + + :type oldest_date: timestamp + :param oldest_date: Specifies the oldest start or close date + and time to return. + + :type tag: string + :param tag: If specified, only executions that have a tag + that matches the filter are counted. + + :type workflow_id: string + :param workflow_id: If specified, only workflow executions + matching the workflow_id are counted. + + :type workflow_name: string + :param workflow_name: Name of the workflow type to filter on. + + :type workflow_version: string + :param workflow_version: Version of the workflow type to filter on. + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the results are being + paginated. To get the next page of results, repeat the call + with the returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + descending order of the start or the close time of the + executions. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + + """ + return self.json_request('ListOpenWorkflowExecutions', { + 'domain': domain, + 'startTimeFilter': {'oldestDate': oldest_date, + 'latestDate': latest_date}, + 'tagFilter': {'tag': tag}, + 'typeFilter': {'name': workflow_name, + 'version': workflow_version}, + 'executionFilter': {'workflowId': workflow_id}, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def count_closed_workflow_executions(self, domain, + start_latest_date=None, + start_oldest_date=None, + close_latest_date=None, + close_oldest_date=None, + close_status=None, + tag=None, + workflow_id=None, + workflow_name=None, + workflow_version=None): + """ + Returns the number of closed workflow executions within the + given domain that meet the specified filtering criteria. + + .. note: + close_status, workflow_id, workflow_name/workflow_version + and tag are mutually exclusive. You can specify at most + one of these in a request. + + .. note: + start_latest_date/start_oldest_date and + close_latest_date/close_oldest_date are mutually + exclusive. You can specify at most one of these in a request. + + :type domain: string + :param domain: The name of the domain containing the + workflow executions to count. + + :type start_latest_date: timestamp + :param start_latest_date: If specified, only workflow executions + that meet the start time criteria of the filter are counted. + + :type start_oldest_date: timestamp + :param start_oldest_date: If specified, only workflow executions + that meet the start time criteria of the filter are counted. + + :type close_latest_date: timestamp + :param close_latest_date: If specified, only workflow executions + that meet the close time criteria of the filter are counted. + + :type close_oldest_date: timestamp + :param close_oldest_date: If specified, only workflow executions + that meet the close time criteria of the filter are counted. + + :type close_status: string + :param close_status: The close status that must match the close status + of an execution for it to meet the criteria of this filter. + Valid values are: + + * COMPLETED + * FAILED + * CANCELED + * TERMINATED + * CONTINUED_AS_NEW + * TIMED_OUT + + :type tag: string + :param tag: If specified, only executions that have a tag + that matches the filter are counted. + + :type workflow_id: string + :param workflow_id: If specified, only workflow executions + matching the workflow_id are counted. + + :type workflow_name: string + :param workflow_name: Name of the workflow type to filter on. + + :type workflow_version: string + :param workflow_version: Version of the workflow type to filter on. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('CountClosedWorkflowExecutions', { + 'domain': domain, + 'startTimeFilter': {'oldestDate': start_oldest_date, + 'latestDate': start_latest_date}, + 'closeTimeFilter': {'oldestDate': close_oldest_date, + 'latestDate': close_latest_date}, + 'closeStatusFilter': {'status': close_status}, + 'tagFilter': {'tag': tag}, + 'typeFilter': {'name': workflow_name, + 'version': workflow_version}, + 'executionFilter': {'workflowId': workflow_id} + }) + + def list_closed_workflow_executions(self, domain, + start_latest_date=None, + start_oldest_date=None, + close_latest_date=None, + close_oldest_date=None, + close_status=None, + tag=None, + workflow_id=None, + workflow_name=None, + workflow_version=None, + maximum_page_size=None, + next_page_token=None, + reverse_order=None): + """ + Returns the number of closed workflow executions within the + given domain that meet the specified filtering criteria. + + .. note: + close_status, workflow_id, workflow_name/workflow_version + and tag are mutually exclusive. You can specify at most + one of these in a request. + + .. note: + start_latest_date/start_oldest_date and + close_latest_date/close_oldest_date are mutually + exclusive. You can specify at most one of these in a request. + + :type domain: string + :param domain: The name of the domain containing the + workflow executions to count. + + :type start_latest_date: timestamp + :param start_latest_date: If specified, only workflow executions + that meet the start time criteria of the filter are counted. + + :type start_oldest_date: timestamp + :param start_oldest_date: If specified, only workflow executions + that meet the start time criteria of the filter are counted. + + :type close_latest_date: timestamp + :param close_latest_date: If specified, only workflow executions + that meet the close time criteria of the filter are counted. + + :type close_oldest_date: timestamp + :param close_oldest_date: If specified, only workflow executions + that meet the close time criteria of the filter are counted. + + :type close_status: string + :param close_status: The close status that must match the close status + of an execution for it to meet the criteria of this filter. + Valid values are: + + * COMPLETED + * FAILED + * CANCELED + * TERMINATED + * CONTINUED_AS_NEW + * TIMED_OUT + + :type tag: string + :param tag: If specified, only executions that have a tag + that matches the filter are counted. + + :type workflow_id: string + :param workflow_id: If specified, only workflow executions + matching the workflow_id are counted. + + :type workflow_name: string + :param workflow_name: Name of the workflow type to filter on. + + :type workflow_version: string + :param workflow_version: Version of the workflow type to filter on. + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the results are being + paginated. To get the next page of results, repeat the call + with the returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + descending order of the start or the close time of the + executions. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('ListClosedWorkflowExecutions', { + 'domain': domain, + 'startTimeFilter': {'oldestDate': start_oldest_date, + 'latestDate': start_latest_date}, + 'closeTimeFilter': {'oldestDate': close_oldest_date, + 'latestDate': close_latest_date}, + 'executionFilter': {'workflowId': workflow_id}, + 'closeStatusFilter': {'status': close_status}, + 'tagFilter': {'tag': tag}, + 'typeFilter': {'name': workflow_name, + 'version': workflow_version}, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + +## Domain Visibility + + def list_domains(self, registration_status, + maximum_page_size=None, + next_page_token=None, reverse_order=None): + """ + Returns the list of domains registered in the account. The + results may be split into multiple pages. To retrieve + subsequent pages, make the call again using the nextPageToken + returned by the initial call. + + :type registration_status: string + :param registration_status: Specifies the registration status + of the domains to list. Valid Values: + + * REGISTERED + * DEPRECATED + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the result has more than one + page. To get the next page of results, repeat the call with + the returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + ascending alphabetical order of the name of the domains. + + :raises: SWFOperationNotPermittedError + """ + return self.json_request('ListDomains', { + 'registrationStatus': registration_status, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def describe_domain(self, name): + """ + Returns information about the specified domain including + description and status. + + :type name: string + :param name: The name of the domain to describe. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('DescribeDomain', {'name': name}) + +## Task List Visibility + + def count_pending_decision_tasks(self, domain, task_list): + """ + Returns the estimated number of decision tasks in the + specified task list. The count returned is an approximation + and is not guaranteed to be exact. If you specify a task list + that no decision task was ever scheduled in then 0 will be + returned. + + :type domain: string + :param domain: The name of the domain that contains the task list. + + :type task_list: string + :param task_list: The name of the task list. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('CountPendingDecisionTasks', { + 'domain': domain, + 'taskList': {'name': task_list} + }) + + def count_pending_activity_tasks(self, domain, task_list): + """ + Returns the estimated number of activity tasks in the + specified task list. The count returned is an approximation + and is not guaranteed to be exact. If you specify a task list + that no activity task was ever scheduled in then 0 will be + returned. + + :type domain: string + :param domain: The name of the domain that contains the task list. + + :type task_list: string + :param task_list: The name of the task list. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('CountPendingActivityTasks', { + 'domain': domain, + 'taskList': {'name': task_list} + }) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/swf/layer1_decisions.py b/desktop/core/ext-py/boto-2.38.0/boto/swf/layer1_decisions.py new file mode 100644 index 0000000000000000000000000000000000000000..3f5f74af40252320c1fbafc1664235f36b1718a7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/swf/layer1_decisions.py @@ -0,0 +1,287 @@ +""" +Helper class for creating decision responses. +""" + + +class Layer1Decisions(object): + """ + Use this object to build a list of decisions for a decision response. + Each method call will add append a new decision. Retrieve the list + of decisions from the _data attribute. + + """ + def __init__(self): + self._data = [] + + def schedule_activity_task(self, + activity_id, + activity_type_name, + activity_type_version, + task_list=None, + control=None, + heartbeat_timeout=None, + schedule_to_close_timeout=None, + schedule_to_start_timeout=None, + start_to_close_timeout=None, + input=None): + """ + Schedules an activity task. + + :type activity_id: string + :param activity_id: The activityId of the type of the activity + being scheduled. + + :type activity_type_name: string + :param activity_type_name: The name of the type of the activity + being scheduled. + + :type activity_type_version: string + :param activity_type_version: The version of the type of the + activity being scheduled. + + :type task_list: string + :param task_list: If set, specifies the name of the task list in + which to schedule the activity task. If not specified, the + defaultTaskList registered with the activity type will be used. + Note: a task list for this activity task must be specified either + as a default for the activity type or through this field. If + neither this field is set nor a default task list was specified + at registration time then a fault will be returned. + """ + o = {} + o['decisionType'] = 'ScheduleActivityTask' + attrs = o['scheduleActivityTaskDecisionAttributes'] = {} + attrs['activityId'] = activity_id + attrs['activityType'] = { + 'name': activity_type_name, + 'version': activity_type_version, + } + if task_list is not None: + attrs['taskList'] = {'name': task_list} + if control is not None: + attrs['control'] = control + if heartbeat_timeout is not None: + attrs['heartbeatTimeout'] = heartbeat_timeout + if schedule_to_close_timeout is not None: + attrs['scheduleToCloseTimeout'] = schedule_to_close_timeout + if schedule_to_start_timeout is not None: + attrs['scheduleToStartTimeout'] = schedule_to_start_timeout + if start_to_close_timeout is not None: + attrs['startToCloseTimeout'] = start_to_close_timeout + if input is not None: + attrs['input'] = input + self._data.append(o) + + def request_cancel_activity_task(self, activity_id): + """ + Attempts to cancel a previously scheduled activity task. If + the activity task was scheduled but has not been assigned to a + worker, then it will be canceled. If the activity task was + already assigned to a worker, then the worker will be informed + that cancellation has been requested in the response to + RecordActivityTaskHeartbeat. + """ + o = {} + o['decisionType'] = 'RequestCancelActivityTask' + attrs = o['requestCancelActivityTaskDecisionAttributes'] = {} + attrs['activityId'] = activity_id + self._data.append(o) + + def record_marker(self, marker_name, details=None): + """ + Records a MarkerRecorded event in the history. Markers can be + used for adding custom information in the history for instance + to let deciders know that they do not need to look at the + history beyond the marker event. + """ + o = {} + o['decisionType'] = 'RecordMarker' + attrs = o['recordMarkerDecisionAttributes'] = {} + attrs['markerName'] = marker_name + if details is not None: + attrs['details'] = details + self._data.append(o) + + def complete_workflow_execution(self, result=None): + """ + Closes the workflow execution and records a WorkflowExecutionCompleted + event in the history + """ + o = {} + o['decisionType'] = 'CompleteWorkflowExecution' + attrs = o['completeWorkflowExecutionDecisionAttributes'] = {} + if result is not None: + attrs['result'] = result + self._data.append(o) + + def fail_workflow_execution(self, reason=None, details=None): + """ + Closes the workflow execution and records a + WorkflowExecutionFailed event in the history. + """ + o = {} + o['decisionType'] = 'FailWorkflowExecution' + attrs = o['failWorkflowExecutionDecisionAttributes'] = {} + if reason is not None: + attrs['reason'] = reason + if details is not None: + attrs['details'] = details + self._data.append(o) + + def cancel_workflow_executions(self, details=None): + """ + Closes the workflow execution and records a WorkflowExecutionCanceled + event in the history. + """ + o = {} + o['decisionType'] = 'CancelWorkflowExecution' + attrs = o['cancelWorkflowExecutionsDecisionAttributes'] = {} + if details is not None: + attrs['details'] = details + self._data.append(o) + + def continue_as_new_workflow_execution(self, + child_policy=None, + execution_start_to_close_timeout=None, + input=None, + tag_list=None, + task_list=None, + start_to_close_timeout=None, + workflow_type_version=None): + """ + Closes the workflow execution and starts a new workflow execution of + the same type using the same workflow id and a unique run Id. A + WorkflowExecutionContinuedAsNew event is recorded in the history. + """ + o = {} + o['decisionType'] = 'ContinueAsNewWorkflowExecution' + attrs = o['continueAsNewWorkflowExecutionDecisionAttributes'] = {} + if child_policy is not None: + attrs['childPolicy'] = child_policy + if execution_start_to_close_timeout is not None: + attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout + if input is not None: + attrs['input'] = input + if tag_list is not None: + attrs['tagList'] = tag_list + if task_list is not None: + attrs['taskList'] = {'name': task_list} + if start_to_close_timeout is not None: + attrs['taskStartToCloseTimeout'] = start_to_close_timeout + if workflow_type_version is not None: + attrs['workflowTypeVersion'] = workflow_type_version + self._data.append(o) + + def start_timer(self, + start_to_fire_timeout, + timer_id, + control=None): + """ + Starts a timer for this workflow execution and records a TimerStarted + event in the history. This timer will fire after the specified delay + and record a TimerFired event. + """ + o = {} + o['decisionType'] = 'StartTimer' + attrs = o['startTimerDecisionAttributes'] = {} + attrs['startToFireTimeout'] = start_to_fire_timeout + attrs['timerId'] = timer_id + if control is not None: + attrs['control'] = control + self._data.append(o) + + def cancel_timer(self, timer_id): + """ + Cancels a previously started timer and records a TimerCanceled + event in the history. + """ + o = {} + o['decisionType'] = 'CancelTimer' + attrs = o['cancelTimerDecisionAttributes'] = {} + attrs['timerId'] = timer_id + self._data.append(o) + + def signal_external_workflow_execution(self, + workflow_id, + signal_name, + run_id=None, + control=None, + input=None): + """ + Requests a signal to be delivered to the specified external workflow + execution and records a SignalExternalWorkflowExecutionInitiated + event in the history. + """ + o = {} + o['decisionType'] = 'SignalExternalWorkflowExecution' + attrs = o['signalExternalWorkflowExecutionDecisionAttributes'] = {} + attrs['workflowId'] = workflow_id + attrs['signalName'] = signal_name + if run_id is not None: + attrs['runId'] = run_id + if control is not None: + attrs['control'] = control + if input is not None: + attrs['input'] = input + self._data.append(o) + + def request_cancel_external_workflow_execution(self, + workflow_id, + control=None, + run_id=None): + """ + Requests that a request be made to cancel the specified + external workflow execution and records a + RequestCancelExternalWorkflowExecutionInitiated event in the + history. + """ + o = {} + o['decisionType'] = 'RequestCancelExternalWorkflowExecution' + attrs = o['requestCancelExternalWorkflowExecutionDecisionAttributes'] = {} + attrs['workflowId'] = workflow_id + if control is not None: + attrs['control'] = control + if run_id is not None: + attrs['runId'] = run_id + self._data.append(o) + + def start_child_workflow_execution(self, + workflow_type_name, + workflow_type_version, + workflow_id, + child_policy=None, + control=None, + execution_start_to_close_timeout=None, + input=None, + tag_list=None, + task_list=None, + task_start_to_close_timeout=None): + """ + Requests that a child workflow execution be started and + records a StartChildWorkflowExecutionInitiated event in the + history. The child workflow execution is a separate workflow + execution with its own history. + """ + o = {} + o['decisionType'] = 'StartChildWorkflowExecution' + attrs = o['startChildWorkflowExecutionDecisionAttributes'] = {} + attrs['workflowType'] = { + 'name': workflow_type_name, + 'version': workflow_type_version, + } + attrs['workflowId'] = workflow_id + if child_policy is not None: + attrs['childPolicy'] = child_policy + if control is not None: + attrs['control'] = control + if execution_start_to_close_timeout is not None: + attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout + if input is not None: + attrs['input'] = input + if tag_list is not None: + attrs['tagList'] = tag_list + if task_list is not None: + attrs['taskList'] = {'name': task_list} + if task_start_to_close_timeout is not None: + attrs['taskStartToCloseTimeout'] = task_start_to_close_timeout + self._data.append(o) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/swf/layer2.py b/desktop/core/ext-py/boto-2.38.0/boto/swf/layer2.py new file mode 100644 index 0000000000000000000000000000000000000000..b829810b339a6532fa7475510e170b9b254bb6c7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/swf/layer2.py @@ -0,0 +1,347 @@ +"""Object-oriented interface to SWF wrapping boto.swf.layer1.Layer1""" + +import time +from functools import wraps +from boto.swf.layer1 import Layer1 +from boto.swf.layer1_decisions import Layer1Decisions + +DEFAULT_CREDENTIALS = { + 'aws_access_key_id': None, + 'aws_secret_access_key': None +} + +def set_default_credentials(aws_access_key_id, aws_secret_access_key): + """Set default credentials.""" + DEFAULT_CREDENTIALS.update({ + 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key, + }) + +class SWFBase(object): + + name = None + domain = None + aws_access_key_id = None + aws_secret_access_key = None + region = None + + def __init__(self, **kwargs): + # Set default credentials. + for credkey in ('aws_access_key_id', 'aws_secret_access_key'): + if DEFAULT_CREDENTIALS.get(credkey): + setattr(self, credkey, DEFAULT_CREDENTIALS[credkey]) + # Override attributes with keyword args. + for kwarg in kwargs: + setattr(self, kwarg, kwargs[kwarg]) + + self._swf = Layer1(self.aws_access_key_id, + self.aws_secret_access_key, + region=self.region) + + def __repr__(self): + rep_str = str(self.name) + if hasattr(self, 'version'): + rep_str += '-' + str(getattr(self, 'version')) + return '<%s %r at 0x%x>' % (self.__class__.__name__, rep_str, id(self)) + +class Domain(SWFBase): + + """Simple Workflow Domain.""" + + description = None + retention = 30 + @wraps(Layer1.describe_domain) + def describe(self): + """DescribeDomain.""" + return self._swf.describe_domain(self.name) + + @wraps(Layer1.deprecate_domain) + def deprecate(self): + """DeprecateDomain""" + self._swf.deprecate_domain(self.name) + + @wraps(Layer1.register_domain) + def register(self): + """RegisterDomain.""" + self._swf.register_domain(self.name, str(self.retention), + self.description) + + @wraps(Layer1.list_activity_types) + def activities(self, status='REGISTERED', **kwargs): + """ListActivityTypes.""" + act_types = self._swf.list_activity_types(self.name, status, **kwargs) + act_objects = [] + for act_args in act_types['typeInfos']: + act_ident = act_args['activityType'] + del act_args['activityType'] + act_args.update(act_ident) + act_args.update({ + 'aws_access_key_id': self.aws_access_key_id, + 'aws_secret_access_key': self.aws_secret_access_key, + 'domain': self.name, + 'region': self.region, + }) + act_objects.append(ActivityType(**act_args)) + return act_objects + + @wraps(Layer1.list_workflow_types) + def workflows(self, status='REGISTERED', **kwargs): + """ListWorkflowTypes.""" + wf_types = self._swf.list_workflow_types(self.name, status, **kwargs) + wf_objects = [] + for wf_args in wf_types['typeInfos']: + wf_ident = wf_args['workflowType'] + del wf_args['workflowType'] + wf_args.update(wf_ident) + wf_args.update({ + 'aws_access_key_id': self.aws_access_key_id, + 'aws_secret_access_key': self.aws_secret_access_key, + 'domain': self.name, + 'region': self.region, + }) + + wf_objects.append(WorkflowType(**wf_args)) + return wf_objects + + def executions(self, closed=False, **kwargs): + """List list open/closed executions. + + For a full list of available parameters refer to + :py:func:`boto.swf.layer1.Layer1.list_closed_workflow_executions` and + :py:func:`boto.swf.layer1.Layer1.list_open_workflow_executions` + """ + if closed: + executions = self._swf.list_closed_workflow_executions(self.name, + **kwargs) + else: + if 'oldest_date' not in kwargs: + # Last 24 hours. + kwargs['oldest_date'] = time.time() - (3600 * 24) + executions = self._swf.list_open_workflow_executions(self.name, + **kwargs) + exe_objects = [] + for exe_args in executions['executionInfos']: + for nested_key in ('execution', 'workflowType'): + nested_dict = exe_args[nested_key] + del exe_args[nested_key] + exe_args.update(nested_dict) + + exe_args.update({ + 'aws_access_key_id': self.aws_access_key_id, + 'aws_secret_access_key': self.aws_secret_access_key, + 'domain': self.name, + 'region': self.region, + }) + + exe_objects.append(WorkflowExecution(**exe_args)) + return exe_objects + + @wraps(Layer1.count_pending_activity_tasks) + def count_pending_activity_tasks(self, task_list): + """CountPendingActivityTasks.""" + return self._swf.count_pending_activity_tasks(self.name, task_list) + + @wraps(Layer1.count_pending_decision_tasks) + def count_pending_decision_tasks(self, task_list): + """CountPendingDecisionTasks.""" + return self._swf.count_pending_decision_tasks(self.name, task_list) + + +class Actor(SWFBase): + + task_list = None + last_tasktoken = None + domain = None + + def run(self): + """To be overloaded by subclasses.""" + raise NotImplementedError() + +class ActivityWorker(Actor): + + """Base class for SimpleWorkflow activity workers.""" + + @wraps(Layer1.respond_activity_task_canceled) + def cancel(self, task_token=None, details=None): + """RespondActivityTaskCanceled.""" + if task_token is None: + task_token = self.last_tasktoken + return self._swf.respond_activity_task_canceled(task_token, details) + + @wraps(Layer1.respond_activity_task_completed) + def complete(self, task_token=None, result=None): + """RespondActivityTaskCompleted.""" + if task_token is None: + task_token = self.last_tasktoken + return self._swf.respond_activity_task_completed(task_token, result) + + @wraps(Layer1.respond_activity_task_failed) + def fail(self, task_token=None, details=None, reason=None): + """RespondActivityTaskFailed.""" + if task_token is None: + task_token = self.last_tasktoken + return self._swf.respond_activity_task_failed(task_token, details, + reason) + + @wraps(Layer1.record_activity_task_heartbeat) + def heartbeat(self, task_token=None, details=None): + """RecordActivityTaskHeartbeat.""" + if task_token is None: + task_token = self.last_tasktoken + return self._swf.record_activity_task_heartbeat(task_token, details) + + @wraps(Layer1.poll_for_activity_task) + def poll(self, **kwargs): + """PollForActivityTask.""" + task_list = self.task_list + if 'task_list' in kwargs: + task_list = kwargs.get('task_list') + del kwargs['task_list'] + task = self._swf.poll_for_activity_task(self.domain, task_list, + **kwargs) + self.last_tasktoken = task.get('taskToken') + return task + +class Decider(Actor): + + """Base class for SimpleWorkflow deciders.""" + + @wraps(Layer1.respond_decision_task_completed) + def complete(self, task_token=None, decisions=None, **kwargs): + """RespondDecisionTaskCompleted.""" + if isinstance(decisions, Layer1Decisions): + # Extract decision list from a Layer1Decisions instance. + decisions = decisions._data + if task_token is None: + task_token = self.last_tasktoken + return self._swf.respond_decision_task_completed(task_token, decisions, + **kwargs) + + @wraps(Layer1.poll_for_decision_task) + def poll(self, **kwargs): + """PollForDecisionTask.""" + task_list = self.task_list + if 'task_list' in kwargs: + task_list = kwargs.get('task_list') + del kwargs['task_list'] + decision_task = self._swf.poll_for_decision_task(self.domain, task_list, + **kwargs) + self.last_tasktoken = decision_task.get('taskToken') + return decision_task + +class WorkflowType(SWFBase): + + """A versioned workflow type.""" + + version = None + task_list = None + child_policy = 'TERMINATE' + + @wraps(Layer1.describe_workflow_type) + def describe(self): + """DescribeWorkflowType.""" + return self._swf.describe_workflow_type(self.domain, self.name, + self.version) + @wraps(Layer1.register_workflow_type) + def register(self, **kwargs): + """RegisterWorkflowType.""" + args = { + 'default_execution_start_to_close_timeout': '3600', + 'default_task_start_to_close_timeout': '300', + 'default_child_policy': 'TERMINATE', + } + args.update(kwargs) + self._swf.register_workflow_type(self.domain, self.name, self.version, + **args) + + @wraps(Layer1.deprecate_workflow_type) + def deprecate(self): + """DeprecateWorkflowType.""" + self._swf.deprecate_workflow_type(self.domain, self.name, self.version) + + @wraps(Layer1.start_workflow_execution) + def start(self, **kwargs): + """StartWorkflowExecution.""" + if 'workflow_id' in kwargs: + workflow_id = kwargs['workflow_id'] + del kwargs['workflow_id'] + else: + workflow_id = '%s-%s-%i' % (self.name, self.version, time.time()) + + for def_attr in ('task_list', 'child_policy'): + kwargs[def_attr] = kwargs.get(def_attr, getattr(self, def_attr)) + run_id = self._swf.start_workflow_execution(self.domain, workflow_id, + self.name, self.version, **kwargs)['runId'] + return WorkflowExecution(name=self.name, version=self.version, + runId=run_id, domain=self.domain, workflowId=workflow_id, + aws_access_key_id=self.aws_access_key_id, + aws_secret_access_key=self.aws_secret_access_key) + +class WorkflowExecution(SWFBase): + + """An instance of a workflow.""" + + workflowId = None + runId = None + + @wraps(Layer1.signal_workflow_execution) + def signal(self, signame, **kwargs): + """SignalWorkflowExecution.""" + self._swf.signal_workflow_execution(self.domain, signame, + self.workflowId, **kwargs) + + @wraps(Layer1.terminate_workflow_execution) + def terminate(self, **kwargs): + """TerminateWorkflowExecution (p. 103).""" + return self._swf.terminate_workflow_execution(self.domain, + self.workflowId, **kwargs) + + @wraps(Layer1.get_workflow_execution_history) + def history(self, **kwargs): + """GetWorkflowExecutionHistory.""" + return self._swf.get_workflow_execution_history(self.domain, self.runId, + self.workflowId, **kwargs)['events'] + + @wraps(Layer1.describe_workflow_execution) + def describe(self): + """DescribeWorkflowExecution.""" + return self._swf.describe_workflow_execution(self.domain, self.runId, + self.workflowId) + + @wraps(Layer1.request_cancel_workflow_execution) + def request_cancel(self): + """RequestCancelWorkflowExecution.""" + return self._swf.request_cancel_workflow_execution(self.domain, + self.workflowId, self.runId) + + +class ActivityType(SWFBase): + + """A versioned activity type.""" + + version = None + + @wraps(Layer1.deprecate_activity_type) + def deprecate(self): + """DeprecateActivityType.""" + return self._swf.deprecate_activity_type(self.domain, self.name, + self.version) + + @wraps(Layer1.describe_activity_type) + def describe(self): + """DescribeActivityType.""" + return self._swf.describe_activity_type(self.domain, self.name, + self.version) + + @wraps(Layer1.register_activity_type) + def register(self, **kwargs): + """RegisterActivityType.""" + args = { + 'default_task_heartbeat_timeout': '600', + 'default_task_schedule_to_close_timeout': '3900', + 'default_task_schedule_to_start_timeout': '300', + 'default_task_start_to_close_timeout': '3600', + } + args.update(kwargs) + self._swf.register_activity_type(self.domain, self.name, self.version, + **args) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/utils.py b/desktop/core/ext-py/boto-2.38.0/boto/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0e7e3a79f37bb086094f1be810a97d23b57da018 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/utils.py @@ -0,0 +1,1051 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# +# Parts of this code were copied or derived from sample code supplied by AWS. +# The following notice applies to that code. +# +# This software code is made available "AS IS" without warranties of any +# kind. You may copy, display, modify and redistribute the software +# code either by itself or as incorporated into your code; provided that +# you do not remove any proprietary notices. Your use of this software +# code is at your own risk and you waive any claim against Amazon +# Digital Services, Inc. or its affiliates with respect to your use of +# this software code. (c) 2006 Amazon Digital Services, Inc. or its +# affiliates. + +""" +Some handy utility functions used by several classes. +""" + +import subprocess +import time +import logging.handlers +import boto +import boto.provider +import tempfile +import random +import smtplib +import datetime +import re +import email.mime.multipart +import email.mime.base +import email.mime.text +import email.utils +import email.encoders +import gzip +import threading +import locale +from boto.compat import six, StringIO, urllib, encodebytes + +from contextlib import contextmanager + +from hashlib import md5, sha512 +_hashfn = sha512 + +from boto.compat import json + +try: + from boto.compat.json import JSONDecodeError +except ImportError: + JSONDecodeError = ValueError + +# List of Query String Arguments of Interest +qsa_of_interest = ['acl', 'cors', 'defaultObjectAcl', 'location', 'logging', + 'partNumber', 'policy', 'requestPayment', 'torrent', + 'versioning', 'versionId', 'versions', 'website', + 'uploads', 'uploadId', 'response-content-type', + 'response-content-language', 'response-expires', + 'response-cache-control', 'response-content-disposition', + 'response-content-encoding', 'delete', 'lifecycle', + 'tagging', 'restore', + # storageClass is a QSA for buckets in Google Cloud Storage. + # (StorageClass is associated to individual keys in S3, but + # having it listed here should cause no problems because + # GET bucket?storageClass is not part of the S3 API.) + 'storageClass', + # websiteConfig is a QSA for buckets in Google Cloud + # Storage. + 'websiteConfig', + # compose is a QSA for objects in Google Cloud Storage. + 'compose'] + + +_first_cap_regex = re.compile('(.)([A-Z][a-z]+)') +_number_cap_regex = re.compile('([a-z])([0-9]+)') +_end_cap_regex = re.compile('([a-z0-9])([A-Z])') + + +def unquote_v(nv): + if len(nv) == 1: + return nv + else: + return (nv[0], urllib.parse.unquote(nv[1])) + + +def canonical_string(method, path, headers, expires=None, + provider=None): + """ + Generates the aws canonical string for the given parameters + """ + if not provider: + provider = boto.provider.get_default() + interesting_headers = {} + for key in headers: + lk = key.lower() + if headers[key] is not None and \ + (lk in ['content-md5', 'content-type', 'date'] or + lk.startswith(provider.header_prefix)): + interesting_headers[lk] = str(headers[key]).strip() + + # these keys get empty strings if they don't exist + if 'content-type' not in interesting_headers: + interesting_headers['content-type'] = '' + if 'content-md5' not in interesting_headers: + interesting_headers['content-md5'] = '' + + # just in case someone used this. it's not necessary in this lib. + if provider.date_header in interesting_headers: + interesting_headers['date'] = '' + + # if you're using expires for query string auth, then it trumps date + # (and provider.date_header) + if expires: + interesting_headers['date'] = str(expires) + + sorted_header_keys = sorted(interesting_headers.keys()) + + buf = "%s\n" % method + for key in sorted_header_keys: + val = interesting_headers[key] + if key.startswith(provider.header_prefix): + buf += "%s:%s\n" % (key, val) + else: + buf += "%s\n" % val + + # don't include anything after the first ? in the resource... + # unless it is one of the QSA of interest, defined above + t = path.split('?') + buf += t[0] + + if len(t) > 1: + qsa = t[1].split('&') + qsa = [a.split('=', 1) for a in qsa] + qsa = [unquote_v(a) for a in qsa if a[0] in qsa_of_interest] + if len(qsa) > 0: + qsa.sort(key=lambda x: x[0]) + qsa = ['='.join(a) for a in qsa] + buf += '?' + buf += '&'.join(qsa) + + return buf + + +def merge_meta(headers, metadata, provider=None): + if not provider: + provider = boto.provider.get_default() + metadata_prefix = provider.metadata_prefix + final_headers = headers.copy() + for k in metadata.keys(): + if k.lower() in boto.s3.key.Key.base_user_settable_fields: + final_headers[k] = metadata[k] + else: + final_headers[metadata_prefix + k] = metadata[k] + + return final_headers + + +def get_aws_metadata(headers, provider=None): + if not provider: + provider = boto.provider.get_default() + metadata_prefix = provider.metadata_prefix + metadata = {} + for hkey in headers.keys(): + if hkey.lower().startswith(metadata_prefix): + val = urllib.parse.unquote(headers[hkey]) + if isinstance(val, bytes): + try: + val = val.decode('utf-8') + except UnicodeDecodeError: + # Just leave the value as-is + pass + metadata[hkey[len(metadata_prefix):]] = val + del headers[hkey] + return metadata + + +def retry_url(url, retry_on_404=True, num_retries=10, timeout=None): + """ + Retry a url. This is specifically used for accessing the metadata + service on an instance. Since this address should never be proxied + (for security reasons), we create a ProxyHandler with a NULL + dictionary to override any proxy settings in the environment. + """ + for i in range(0, num_retries): + try: + proxy_handler = urllib.request.ProxyHandler({}) + opener = urllib.request.build_opener(proxy_handler) + req = urllib.request.Request(url) + r = opener.open(req, timeout=timeout) + result = r.read() + + if(not isinstance(result, six.string_types) and + hasattr(result, 'decode')): + result = result.decode('utf-8') + + return result + except urllib.error.HTTPError as e: + code = e.getcode() + if code == 404 and not retry_on_404: + return '' + except Exception as e: + pass + boto.log.exception('Caught exception reading instance data') + # If not on the last iteration of the loop then sleep. + if i + 1 != num_retries: + time.sleep(min(2 ** i, + boto.config.get('Boto', 'max_retry_delay', 60))) + boto.log.error('Unable to read instance data, giving up') + return '' + + +def _get_instance_metadata(url, num_retries, timeout=None): + return LazyLoadMetadata(url, num_retries, timeout) + + +class LazyLoadMetadata(dict): + def __init__(self, url, num_retries, timeout=None): + self._url = url + self._num_retries = num_retries + self._leaves = {} + self._dicts = [] + self._timeout = timeout + data = boto.utils.retry_url(self._url, num_retries=self._num_retries, timeout=self._timeout) + if data: + fields = data.split('\n') + for field in fields: + if field.endswith('/'): + key = field[0:-1] + self._dicts.append(key) + else: + p = field.find('=') + if p > 0: + key = field[p + 1:] + resource = field[0:p] + '/openssh-key' + else: + key = resource = field + self._leaves[key] = resource + self[key] = None + + def _materialize(self): + for key in self: + self[key] + + def __getitem__(self, key): + if key not in self: + # allow dict to throw the KeyError + return super(LazyLoadMetadata, self).__getitem__(key) + + # already loaded + val = super(LazyLoadMetadata, self).__getitem__(key) + if val is not None: + return val + + if key in self._leaves: + resource = self._leaves[key] + last_exception = None + + for i in range(0, self._num_retries): + try: + val = boto.utils.retry_url( + self._url + urllib.parse.quote(resource, + safe="/:"), + num_retries=self._num_retries, + timeout=self._timeout) + if val and val[0] == '{': + val = json.loads(val) + break + else: + p = val.find('\n') + if p > 0: + val = val.split('\n') + break + + except JSONDecodeError as e: + boto.log.debug( + "encountered '%s' exception: %s" % ( + e.__class__.__name__, e)) + boto.log.debug( + 'corrupted JSON data found: %s' % val) + last_exception = e + + except Exception as e: + boto.log.debug("encountered unretryable" + + " '%s' exception, re-raising" % ( + e.__class__.__name__)) + last_exception = e + raise + + boto.log.error("Caught exception reading meta data" + + " for the '%s' try" % (i + 1)) + + if i + 1 != self._num_retries: + next_sleep = min( + random.random() * 2 ** i, + boto.config.get('Boto', 'max_retry_delay', 60)) + time.sleep(next_sleep) + else: + boto.log.error('Unable to read meta data, giving up') + boto.log.error( + "encountered '%s' exception: %s" % ( + last_exception.__class__.__name__, last_exception)) + raise last_exception + + self[key] = val + elif key in self._dicts: + self[key] = LazyLoadMetadata(self._url + key + '/', + self._num_retries) + + return super(LazyLoadMetadata, self).__getitem__(key) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def values(self): + self._materialize() + return super(LazyLoadMetadata, self).values() + + def items(self): + self._materialize() + return super(LazyLoadMetadata, self).items() + + def __str__(self): + self._materialize() + return super(LazyLoadMetadata, self).__str__() + + def __repr__(self): + self._materialize() + return super(LazyLoadMetadata, self).__repr__() + + +def _build_instance_metadata_url(url, version, path): + """ + Builds an EC2 metadata URL for fetching information about an instance. + + Example: + + >>> _build_instance_metadata_url('http://169.254.169.254', 'latest', 'meta-data/') + http://169.254.169.254/latest/meta-data/ + + :type url: string + :param url: URL to metadata service, e.g. 'http://169.254.169.254' + + :type version: string + :param version: Version of the metadata to get, e.g. 'latest' + + :type path: string + :param path: Path of the metadata to get, e.g. 'meta-data/'. If a trailing + slash is required it must be passed in with the path. + + :return: The full metadata URL + """ + return '%s/%s/%s' % (url, version, path) + + +def get_instance_metadata(version='latest', url='http://169.254.169.254', + data='meta-data/', timeout=None, num_retries=5): + """ + Returns the instance metadata as a nested Python dictionary. + Simple values (e.g. local_hostname, hostname, etc.) will be + stored as string values. Values such as ancestor-ami-ids will + be stored in the dict as a list of string values. More complex + fields such as public-keys and will be stored as nested dicts. + + If the timeout is specified, the connection to the specified url + will time out after the specified number of seconds. + + """ + try: + metadata_url = _build_instance_metadata_url(url, version, data) + return _get_instance_metadata(metadata_url, num_retries=num_retries, timeout=timeout) + except urllib.error.URLError: + return None + + +def get_instance_identity(version='latest', url='http://169.254.169.254', + timeout=None, num_retries=5): + """ + Returns the instance identity as a nested Python dictionary. + """ + iid = {} + base_url = _build_instance_metadata_url(url, version, + 'dynamic/instance-identity/') + try: + data = retry_url(base_url, num_retries=num_retries, timeout=timeout) + fields = data.split('\n') + for field in fields: + val = retry_url(base_url + '/' + field + '/', num_retries=num_retries, timeout=timeout) + if val[0] == '{': + val = json.loads(val) + if field: + iid[field] = val + return iid + except urllib.error.URLError: + return None + + +def get_instance_userdata(version='latest', sep=None, + url='http://169.254.169.254', timeout=None, num_retries=5): + ud_url = _build_instance_metadata_url(url, version, 'user-data') + user_data = retry_url(ud_url, retry_on_404=False, num_retries=num_retries, timeout=timeout) + if user_data: + if sep: + l = user_data.split(sep) + user_data = {} + for nvpair in l: + t = nvpair.split('=') + user_data[t[0].strip()] = t[1].strip() + return user_data + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' +ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ' +RFC1123 = '%a, %d %b %Y %H:%M:%S %Z' +LOCALE_LOCK = threading.Lock() + + +@contextmanager +def setlocale(name): + """ + A context manager to set the locale in a threadsafe manner. + """ + with LOCALE_LOCK: + saved = locale.setlocale(locale.LC_ALL) + + try: + yield locale.setlocale(locale.LC_ALL, name) + finally: + locale.setlocale(locale.LC_ALL, saved) + + +def get_ts(ts=None): + if not ts: + ts = time.gmtime() + return time.strftime(ISO8601, ts) + + +def parse_ts(ts): + with setlocale('C'): + ts = ts.strip() + try: + dt = datetime.datetime.strptime(ts, ISO8601) + return dt + except ValueError: + try: + dt = datetime.datetime.strptime(ts, ISO8601_MS) + return dt + except ValueError: + dt = datetime.datetime.strptime(ts, RFC1123) + return dt + + +def find_class(module_name, class_name=None): + if class_name: + module_name = "%s.%s" % (module_name, class_name) + modules = module_name.split('.') + c = None + + try: + for m in modules[1:]: + if c: + c = getattr(c, m) + else: + c = getattr(__import__(".".join(modules[0:-1])), m) + return c + except: + return None + + +def update_dme(username, password, dme_id, ip_address): + """ + Update your Dynamic DNS record with DNSMadeEasy.com + """ + dme_url = 'https://www.dnsmadeeasy.com/servlet/updateip' + dme_url += '?username=%s&password=%s&id=%s&ip=%s' + s = urllib.request.urlopen(dme_url % (username, password, dme_id, ip_address)) + return s.read() + + +def fetch_file(uri, file=None, username=None, password=None): + """ + Fetch a file based on the URI provided. + If you do not pass in a file pointer a tempfile.NamedTemporaryFile, + or None if the file could not be retrieved is returned. + The URI can be either an HTTP url, or "s3://bucket_name/key_name" + """ + boto.log.info('Fetching %s' % uri) + if file is None: + file = tempfile.NamedTemporaryFile() + try: + if uri.startswith('s3://'): + bucket_name, key_name = uri[len('s3://'):].split('/', 1) + c = boto.connect_s3(aws_access_key_id=username, + aws_secret_access_key=password) + bucket = c.get_bucket(bucket_name) + key = bucket.get_key(key_name) + key.get_contents_to_file(file) + else: + if username and password: + passman = urllib.request.HTTPPasswordMgrWithDefaultRealm() + passman.add_password(None, uri, username, password) + authhandler = urllib.request.HTTPBasicAuthHandler(passman) + opener = urllib.request.build_opener(authhandler) + urllib.request.install_opener(opener) + s = urllib.request.urlopen(uri) + file.write(s.read()) + file.seek(0) + except: + raise + boto.log.exception('Problem Retrieving file: %s' % uri) + file = None + return file + + +class ShellCommand(object): + + def __init__(self, command, wait=True, fail_fast=False, cwd=None): + self.exit_code = 0 + self.command = command + self.log_fp = StringIO() + self.wait = wait + self.fail_fast = fail_fast + self.run(cwd=cwd) + + def run(self, cwd=None): + boto.log.info('running:%s' % self.command) + self.process = subprocess.Popen(self.command, shell=True, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=cwd) + if(self.wait): + while self.process.poll() is None: + time.sleep(1) + t = self.process.communicate() + self.log_fp.write(t[0]) + self.log_fp.write(t[1]) + boto.log.info(self.log_fp.getvalue()) + self.exit_code = self.process.returncode + + if self.fail_fast and self.exit_code != 0: + raise Exception("Command " + self.command + + " failed with status " + self.exit_code) + + return self.exit_code + + def setReadOnly(self, value): + raise AttributeError + + def getStatus(self): + return self.exit_code + + status = property(getStatus, setReadOnly, None, + 'The exit code for the command') + + def getOutput(self): + return self.log_fp.getvalue() + + output = property(getOutput, setReadOnly, None, + 'The STDIN and STDERR output of the command') + + +class AuthSMTPHandler(logging.handlers.SMTPHandler): + """ + This class extends the SMTPHandler in the standard Python logging module + to accept a username and password on the constructor and to then use those + credentials to authenticate with the SMTP server. To use this, you could + add something like this in your boto config file: + + [handler_hand07] + class=boto.utils.AuthSMTPHandler + level=WARN + formatter=form07 + args=('localhost', 'username', 'password', 'from@abc', ['user1@abc', 'user2@xyz'], 'Logger Subject') + """ + + def __init__(self, mailhost, username, password, + fromaddr, toaddrs, subject): + """ + Initialize the handler. + + We have extended the constructor to accept a username/password + for SMTP authentication. + """ + super(AuthSMTPHandler, self).__init__(mailhost, fromaddr, + toaddrs, subject) + self.username = username + self.password = password + + def emit(self, record): + """ + Emit a record. + + Format the record and send it to the specified addressees. + It would be really nice if I could add authorization to this class + without having to resort to cut and paste inheritance but, no. + """ + try: + port = self.mailport + if not port: + port = smtplib.SMTP_PORT + smtp = smtplib.SMTP(self.mailhost, port) + smtp.login(self.username, self.password) + msg = self.format(record) + msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( + self.fromaddr, + ','.join(self.toaddrs), + self.getSubject(record), + email.utils.formatdate(), msg) + smtp.sendmail(self.fromaddr, self.toaddrs, msg) + smtp.quit() + except (KeyboardInterrupt, SystemExit): + raise + except: + self.handleError(record) + + +class LRUCache(dict): + """A dictionary-like object that stores only a certain number of items, and + discards its least recently used item when full. + + >>> cache = LRUCache(3) + >>> cache['A'] = 0 + >>> cache['B'] = 1 + >>> cache['C'] = 2 + >>> len(cache) + 3 + + >>> cache['A'] + 0 + + Adding new items to the cache does not increase its size. Instead, the least + recently used item is dropped: + + >>> cache['D'] = 3 + >>> len(cache) + 3 + >>> 'B' in cache + False + + Iterating over the cache returns the keys, starting with the most recently + used: + + >>> for key in cache: + ... print key + D + A + C + + This code is based on the LRUCache class from Genshi which is based on + `Myghty `_'s LRUCache from ``myghtyutils.util``, + written by Mike Bayer and released under the MIT license (Genshi uses the + BSD License). + """ + + class _Item(object): + def __init__(self, key, value): + self.previous = self.next = None + self.key = key + self.value = value + + def __repr__(self): + return repr(self.value) + + def __init__(self, capacity): + self._dict = dict() + self.capacity = capacity + self.head = None + self.tail = None + + def __contains__(self, key): + return key in self._dict + + def __iter__(self): + cur = self.head + while cur: + yield cur.key + cur = cur.next + + def __len__(self): + return len(self._dict) + + def __getitem__(self, key): + item = self._dict[key] + self._update_item(item) + return item.value + + def __setitem__(self, key, value): + item = self._dict.get(key) + if item is None: + item = self._Item(key, value) + self._dict[key] = item + self._insert_item(item) + else: + item.value = value + self._update_item(item) + self._manage_size() + + def __repr__(self): + return repr(self._dict) + + def _insert_item(self, item): + item.previous = None + item.next = self.head + if self.head is not None: + self.head.previous = item + else: + self.tail = item + self.head = item + self._manage_size() + + def _manage_size(self): + while len(self._dict) > self.capacity: + del self._dict[self.tail.key] + if self.tail != self.head: + self.tail = self.tail.previous + self.tail.next = None + else: + self.head = self.tail = None + + def _update_item(self, item): + if self.head == item: + return + + previous = item.previous + previous.next = item.next + if item.next is not None: + item.next.previous = previous + else: + self.tail = previous + + item.previous = None + item.next = self.head + self.head.previous = self.head = item + + +class Password(object): + """ + Password object that stores itself as hashed. + Hash defaults to SHA512 if available, MD5 otherwise. + """ + hashfunc = _hashfn + + def __init__(self, str=None, hashfunc=None): + """ + Load the string from an initial value, this should be the + raw hashed password. + """ + self.str = str + if hashfunc: + self.hashfunc = hashfunc + + def set(self, value): + if not isinstance(value, bytes): + value = value.encode('utf-8') + self.str = self.hashfunc(value).hexdigest() + + def __str__(self): + return str(self.str) + + def __eq__(self, other): + if other is None: + return False + if not isinstance(other, bytes): + other = other.encode('utf-8') + return str(self.hashfunc(other).hexdigest()) == str(self.str) + + def __len__(self): + if self.str: + return len(self.str) + else: + return 0 + + +def notify(subject, body=None, html_body=None, to_string=None, + attachments=None, append_instance_id=True): + attachments = attachments or [] + if append_instance_id: + subject = "[%s] %s" % ( + boto.config.get_value("Instance", "instance-id"), subject) + if not to_string: + to_string = boto.config.get_value('Notification', 'smtp_to', None) + if to_string: + try: + from_string = boto.config.get_value('Notification', + 'smtp_from', 'boto') + msg = email.mime.multipart.MIMEMultipart() + msg['From'] = from_string + msg['Reply-To'] = from_string + msg['To'] = to_string + msg['Date'] = email.utils.formatdate(localtime=True) + msg['Subject'] = subject + + if body: + msg.attach(email.mime.text.MIMEText(body)) + + if html_body: + part = email.mime.base.MIMEBase('text', 'html') + part.set_payload(html_body) + email.encoders.encode_base64(part) + msg.attach(part) + + for part in attachments: + msg.attach(part) + + smtp_host = boto.config.get_value('Notification', + 'smtp_host', 'localhost') + + # Alternate port support + if boto.config.get_value("Notification", "smtp_port"): + server = smtplib.SMTP(smtp_host, int( + boto.config.get_value("Notification", "smtp_port"))) + else: + server = smtplib.SMTP(smtp_host) + + # TLS support + if boto.config.getbool("Notification", "smtp_tls"): + server.ehlo() + server.starttls() + server.ehlo() + smtp_user = boto.config.get_value('Notification', 'smtp_user', '') + smtp_pass = boto.config.get_value('Notification', 'smtp_pass', '') + if smtp_user: + server.login(smtp_user, smtp_pass) + server.sendmail(from_string, to_string, msg.as_string()) + server.quit() + except: + boto.log.exception('notify failed') + + +def get_utf8_value(value): + if not six.PY2 and isinstance(value, bytes): + return value + + if not isinstance(value, six.string_types): + value = six.text_type(value) + + if isinstance(value, six.text_type): + value = value.encode('utf-8') + + return value + + +def mklist(value): + if not isinstance(value, list): + if isinstance(value, tuple): + value = list(value) + else: + value = [value] + return value + + +def pythonize_name(name): + """Convert camel case to a "pythonic" name. + + Examples:: + + pythonize_name('CamelCase') -> 'camel_case' + pythonize_name('already_pythonized') -> 'already_pythonized' + pythonize_name('HTTPRequest') -> 'http_request' + pythonize_name('HTTPStatus200Ok') -> 'http_status_200_ok' + pythonize_name('UPPER') -> 'upper' + pythonize_name('') -> '' + + """ + s1 = _first_cap_regex.sub(r'\1_\2', name) + s2 = _number_cap_regex.sub(r'\1_\2', s1) + return _end_cap_regex.sub(r'\1_\2', s2).lower() + + +def write_mime_multipart(content, compress=False, deftype='text/plain', delimiter=':'): + """Description: + :param content: A list of tuples of name-content pairs. This is used + instead of a dict to ensure that scripts run in order + :type list of tuples: + + :param compress: Use gzip to compress the scripts, defaults to no compression + :type bool: + + :param deftype: The type that should be assumed if nothing else can be figured out + :type str: + + :param delimiter: mime delimiter + :type str: + + :return: Final mime multipart + :rtype: str: + """ + wrapper = email.mime.multipart.MIMEMultipart() + for name, con in content: + definite_type = guess_mime_type(con, deftype) + maintype, subtype = definite_type.split('/', 1) + if maintype == 'text': + mime_con = email.mime.text.MIMEText(con, _subtype=subtype) + else: + mime_con = email.mime.base.MIMEBase(maintype, subtype) + mime_con.set_payload(con) + # Encode the payload using Base64 + email.encoders.encode_base64(mime_con) + mime_con.add_header('Content-Disposition', 'attachment', filename=name) + wrapper.attach(mime_con) + rcontent = wrapper.as_string() + + if compress: + buf = StringIO() + gz = gzip.GzipFile(mode='wb', fileobj=buf) + try: + gz.write(rcontent) + finally: + gz.close() + rcontent = buf.getvalue() + + return rcontent + + +def guess_mime_type(content, deftype): + """Description: Guess the mime type of a block of text + :param content: content we're finding the type of + :type str: + + :param deftype: Default mime type + :type str: + + :rtype: : + :return: + """ + # Mappings recognized by cloudinit + starts_with_mappings = { + '#include': 'text/x-include-url', + '#!': 'text/x-shellscript', + '#cloud-config': 'text/cloud-config', + '#upstart-job': 'text/upstart-job', + '#part-handler': 'text/part-handler', + '#cloud-boothook': 'text/cloud-boothook' + } + rtype = deftype + for possible_type, mimetype in starts_with_mappings.items(): + if content.startswith(possible_type): + rtype = mimetype + break + return(rtype) + + +def compute_md5(fp, buf_size=8192, size=None): + """ + Compute MD5 hash on passed file and return results in a tuple of values. + + :type fp: file + :param fp: File pointer to the file to MD5 hash. The file pointer + will be reset to its current location before the + method returns. + + :type buf_size: integer + :param buf_size: Number of bytes per read request. + + :type size: int + :param size: (optional) The Maximum number of bytes to read from + the file pointer (fp). This is useful when uploading + a file in multiple parts where the file is being + split inplace into different parts. Less bytes may + be available. + + :rtype: tuple + :return: A tuple containing the hex digest version of the MD5 hash + as the first element, the base64 encoded version of the + plain digest as the second element and the data size as + the third element. + """ + return compute_hash(fp, buf_size, size, hash_algorithm=md5) + + +def compute_hash(fp, buf_size=8192, size=None, hash_algorithm=md5): + hash_obj = hash_algorithm() + spos = fp.tell() + if size and size < buf_size: + s = fp.read(size) + else: + s = fp.read(buf_size) + while s: + if not isinstance(s, bytes): + s = s.encode('utf-8') + hash_obj.update(s) + if size: + size -= len(s) + if size <= 0: + break + if size and size < buf_size: + s = fp.read(size) + else: + s = fp.read(buf_size) + hex_digest = hash_obj.hexdigest() + base64_digest = encodebytes(hash_obj.digest()).decode('utf-8') + if base64_digest[-1] == '\n': + base64_digest = base64_digest[0:-1] + # data_size based on bytes read. + data_size = fp.tell() - spos + fp.seek(spos) + return (hex_digest, base64_digest, data_size) + + +def find_matching_headers(name, headers): + """ + Takes a specific header name and a dict of headers {"name": "value"}. + Returns a list of matching header names, case-insensitive. + + """ + return [h for h in headers if h.lower() == name.lower()] + + +def merge_headers_by_name(name, headers): + """ + Takes a specific header name and a dict of headers {"name": "value"}. + Returns a string of all header values, comma-separated, that match the + input header name, case-insensitive. + + """ + matching_headers = find_matching_headers(name, headers) + return ','.join(str(headers[h]) for h in matching_headers + if headers[h] is not None) + + +class RequestHook(object): + """ + This can be extended and supplied to the connection object + to gain access to request and response object after the request completes. + One use for this would be to implement some specific request logging. + """ + def handle_request_data(self, request, response, error=False): + pass diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vendored/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/vendored/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vendored/six.py b/desktop/core/ext-py/boto-2.38.0/boto/vendored/six.py new file mode 100644 index 0000000000000000000000000000000000000000..55f5c3bfe39cae68006a3304d910de040d89316e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/vendored/six.py @@ -0,0 +1,756 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2014 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import functools +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.7.2" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + # This is a bit ugly, but it avoids running this again. + delattr(obj.__class__, self.name) + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "xmlrpclib", "xmlrpc.server"), + MovedModule("winreg", "_winreg"), +] +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) +else: + def iterkeys(d, **kw): + return iter(d.iterkeys(**kw)) + + def itervalues(d, **kw): + return iter(d.itervalues(**kw)) + + def iteritems(d, **kw): + return iter(d.iteritems(**kw)) + + def iterlists(d, **kw): + return iter(d.iterlists(**kw)) + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + def u(s): + return s + unichr = chr + if sys.version_info[1] <= 1: + def int2byte(i): + return bytes((i,)) + else: + # This is about 2x faster than the implementation above on 3.2+ + int2byte = operator.methodcaller("to_bytes", 1, "big") + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO +else: + def b(s): + return s + # Workaround for standalone backslash + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + def byte2int(bs): + return ord(bs[0]) + def indexbytes(buf, i): + return ord(buf[i]) + def iterbytes(buf): + return (ord(byte) for byte in buf) + import StringIO + StringIO = BytesIO = StringIO.StringIO +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped): + def wrapper(f): + f = functools.wraps(wrapped)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a + # dummy metaclass for one level of class instantiation that replaces + # itself with the actual metaclass. Because of internal type checks + # we also need to make sure that we downgrade the custom metaclass + # for one level to something closer to type (that's why __call__ and + # __init__ comes back from type etc.). + class metaclass(meta): + __call__ = type.__call__ + __init__ = type.__init__ + def __new__(cls, name, this_bases, d): + if this_bases is None: + return type.__new__(cls, name, (), d) + return meta(name, bases, d) + return metaclass('temporary_class', None, {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +try: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +except NameError: + pass +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vpc/__init__.py b/desktop/core/ext-py/boto-2.38.0/boto/vpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2c87adfc9ad2620c2477356e9fe04cbc7606cb6f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/vpc/__init__.py @@ -0,0 +1,1828 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a connection to the EC2 service. +""" + +from boto.ec2.connection import EC2Connection +from boto.resultset import ResultSet +from boto.vpc.vpc import VPC +from boto.vpc.customergateway import CustomerGateway +from boto.vpc.networkacl import NetworkAcl +from boto.vpc.routetable import RouteTable +from boto.vpc.internetgateway import InternetGateway +from boto.vpc.vpngateway import VpnGateway, Attachment +from boto.vpc.dhcpoptions import DhcpOptions +from boto.vpc.subnet import Subnet +from boto.vpc.vpnconnection import VpnConnection +from boto.vpc.vpc_peering_connection import VpcPeeringConnection +from boto.ec2 import RegionData +from boto.regioninfo import RegionInfo, get_regions + + +def regions(**kw_params): + """ + Get all available regions for the EC2 service. + You may pass any of the arguments accepted by the VPCConnection + object's constructor as keyword arguments and they will be + passed along to the VPCConnection object. + + :rtype: list + :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` + """ + return get_regions('ec2', connection_cls=VPCConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.vpc.VPCConnection`. + Any additional parameters after the region_name are passed on to + the connect method of the region object. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.vpc.VPCConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + for region in regions(**kw_params): + if region.name == region_name: + return region.connect(**kw_params) + return None + + +class VPCConnection(EC2Connection): + + # VPC methods + + def get_all_vpcs(self, vpc_ids=None, filters=None, dry_run=False): + """ + Retrieve information about your VPCs. You can filter results to + return information only about those VPCs that match your search + parameters. Otherwise, all VPCs associated with your account + are returned. + + :type vpc_ids: list + :param vpc_ids: A list of strings with the desired VPC ID's + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + Possible filter keys are: + + * *state* - a list of states of the VPC (pending or available) + * *cidrBlock* - a list CIDR blocks of the VPC + * *dhcpOptionsId* - a list of IDs of a set of DHCP options + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.vpc.VPC` + """ + params = {} + if vpc_ids: + self.build_list_params(params, vpc_ids, 'VpcId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpcs', params, [('item', VPC)]) + + def create_vpc(self, cidr_block, instance_tenancy=None, dry_run=False): + """ + Create a new Virtual Private Cloud. + + :type cidr_block: str + :param cidr_block: A valid CIDR block + + :type instance_tenancy: str + :param instance_tenancy: The supported tenancy options for instances + launched into the VPC. Valid values are 'default' and 'dedicated'. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created VPC + :return: A :class:`boto.vpc.vpc.VPC` object + """ + params = {'CidrBlock': cidr_block} + if instance_tenancy: + params['InstanceTenancy'] = instance_tenancy + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateVpc', params, VPC) + + def delete_vpc(self, vpc_id, dry_run=False): + """ + Delete a Virtual Private Cloud. + + :type vpc_id: str + :param vpc_id: The ID of the vpc to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpc', params) + + def modify_vpc_attribute(self, vpc_id, + enable_dns_support=None, + enable_dns_hostnames=None, dry_run=False): + """ + Modifies the specified attribute of the specified VPC. + You can only modify one attribute at a time. + + :type vpc_id: str + :param vpc_id: The ID of the vpc to be deleted. + + :type enable_dns_support: bool + :param enable_dns_support: Specifies whether the DNS server + provided by Amazon is enabled for the VPC. + + :type enable_dns_hostnames: bool + :param enable_dns_hostnames: Specifies whether DNS hostnames are + provided for the instances launched in this VPC. You can only + set this attribute to ``true`` if EnableDnsSupport + is also ``true``. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'VpcId': vpc_id} + if enable_dns_support is not None: + if enable_dns_support: + params['EnableDnsSupport.Value'] = 'true' + else: + params['EnableDnsSupport.Value'] = 'false' + if enable_dns_hostnames is not None: + if enable_dns_hostnames: + params['EnableDnsHostnames.Value'] = 'true' + else: + params['EnableDnsHostnames.Value'] = 'false' + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifyVpcAttribute', params) + + # Route Tables + + def get_all_route_tables(self, route_table_ids=None, filters=None, + dry_run=False): + """ + Retrieve information about your routing tables. You can filter results + to return information only about those route tables that match your + search parameters. Otherwise, all route tables associated with your + account are returned. + + :type route_table_ids: list + :param route_table_ids: A list of strings with the desired route table + IDs. + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.routetable.RouteTable` + """ + params = {} + if route_table_ids: + self.build_list_params(params, route_table_ids, "RouteTableId") + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeRouteTables', params, + [('item', RouteTable)]) + + def associate_route_table(self, route_table_id, subnet_id, dry_run=False): + """ + Associates a route table with a specific subnet. + + :type route_table_id: str + :param route_table_id: The ID of the route table to associate. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to associate with. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: str + :return: The ID of the association created + """ + params = { + 'RouteTableId': route_table_id, + 'SubnetId': subnet_id + } + if dry_run: + params['DryRun'] = 'true' + result = self.get_object('AssociateRouteTable', params, ResultSet) + return result.associationId + + def disassociate_route_table(self, association_id, dry_run=False): + """ + Removes an association from a route table. This will cause all subnets + that would've used this association to now use the main routing + association instead. + + :type association_id: str + :param association_id: The ID of the association to disassociate. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'AssociationId': association_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DisassociateRouteTable', params) + + def create_route_table(self, vpc_id, dry_run=False): + """ + Creates a new route table. + + :type vpc_id: str + :param vpc_id: The VPC ID to associate this route table with. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created route table + :return: A :class:`boto.vpc.routetable.RouteTable` object + """ + params = {'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateRouteTable', params, RouteTable) + + def delete_route_table(self, route_table_id, dry_run=False): + """ + Delete a route table. + + :type route_table_id: str + :param route_table_id: The ID of the route table to delete. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'RouteTableId': route_table_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteRouteTable', params) + + def _replace_route_table_association(self, association_id, + route_table_id, dry_run=False): + """ + Helper function for replace_route_table_association and + replace_route_table_association_with_assoc. Should not be used directly. + + :type association_id: str + :param association_id: The ID of the existing association to replace. + + :type route_table_id: str + :param route_table_id: The route table to ID to be used in the + association. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: ResultSet + :return: ResultSet of Amazon resposne + """ + params = { + 'AssociationId': association_id, + 'RouteTableId': route_table_id + } + if dry_run: + params['DryRun'] = 'true' + return self.get_object('ReplaceRouteTableAssociation', params, + ResultSet) + + def replace_route_table_assocation(self, association_id, + route_table_id, dry_run=False): + """ + Replaces a route association with a new route table. This can be + used to replace the 'main' route table by using the main route + table association instead of the more common subnet type + association. + + NOTE: It may be better to use replace_route_table_association_with_assoc + instead of this function; this function does not return the new + association ID. This function is retained for backwards compatibility. + + + :type association_id: str + :param association_id: The ID of the existing association to replace. + + :type route_table_id: str + :param route_table_id: The route table to ID to be used in the + association. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self._replace_route_table_association( + association_id, route_table_id, dry_run=dry_run).status + + def replace_route_table_association_with_assoc(self, association_id, + route_table_id, + dry_run=False): + """ + Replaces a route association with a new route table. This can be + used to replace the 'main' route table by using the main route + table association instead of the more common subnet type + association. Returns the new association ID. + + :type association_id: str + :param association_id: The ID of the existing association to replace. + + :type route_table_id: str + :param route_table_id: The route table to ID to be used in the + association. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: str + :return: New association ID + """ + return self._replace_route_table_association( + association_id, route_table_id, dry_run=dry_run).newAssociationId + + def create_route(self, route_table_id, destination_cidr_block, + gateway_id=None, instance_id=None, interface_id=None, + vpc_peering_connection_id=None, + dry_run=False): + """ + Creates a new route in the route table within a VPC. The route's target + can be either a gateway attached to the VPC or a NAT instance in the + VPC. + + :type route_table_id: str + :param route_table_id: The ID of the route table for the route. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR address block used for the + destination match. + + :type gateway_id: str + :param gateway_id: The ID of the gateway attached to your VPC. + + :type instance_id: str + :param instance_id: The ID of a NAT instance in your VPC. + + :type interface_id: str + :param interface_id: Allows routing to network interface attachments. + + :type vpc_peering_connection_id: str + :param vpc_peering_connection_id: Allows routing to VPC peering + connection. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'DestinationCidrBlock': destination_cidr_block + } + + if gateway_id is not None: + params['GatewayId'] = gateway_id + elif instance_id is not None: + params['InstanceId'] = instance_id + elif interface_id is not None: + params['NetworkInterfaceId'] = interface_id + elif vpc_peering_connection_id is not None: + params['VpcPeeringConnectionId'] = vpc_peering_connection_id + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('CreateRoute', params) + + def replace_route(self, route_table_id, destination_cidr_block, + gateway_id=None, instance_id=None, interface_id=None, + vpc_peering_connection_id=None, + dry_run=False): + """ + Replaces an existing route within a route table in a VPC. + + :type route_table_id: str + :param route_table_id: The ID of the route table for the route. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR address block used for the + destination match. + + :type gateway_id: str + :param gateway_id: The ID of the gateway attached to your VPC. + + :type instance_id: str + :param instance_id: The ID of a NAT instance in your VPC. + + :type interface_id: str + :param interface_id: Allows routing to network interface attachments. + + :type vpc_peering_connection_id: str + :param vpc_peering_connection_id: Allows routing to VPC peering + connection. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'DestinationCidrBlock': destination_cidr_block + } + + if gateway_id is not None: + params['GatewayId'] = gateway_id + elif instance_id is not None: + params['InstanceId'] = instance_id + elif interface_id is not None: + params['NetworkInterfaceId'] = interface_id + elif vpc_peering_connection_id is not None: + params['VpcPeeringConnectionId'] = vpc_peering_connection_id + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('ReplaceRoute', params) + + def delete_route(self, route_table_id, destination_cidr_block, + dry_run=False): + """ + Deletes a route from a route table within a VPC. + + :type route_table_id: str + :param route_table_id: The ID of the route table with the route. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR address block used for + destination match. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'DestinationCidrBlock': destination_cidr_block + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteRoute', params) + + #Network ACLs + + def get_all_network_acls(self, network_acl_ids=None, filters=None): + """ + Retrieve information about your network acls. You can filter results + to return information only about those network acls that match your + search parameters. Otherwise, all network acls associated with your + account are returned. + + :type network_acl_ids: list + :param network_acl_ids: A list of strings with the desired network ACL + IDs. + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + + :rtype: list + :return: A list of :class:`boto.vpc.networkacl.NetworkAcl` + """ + params = {} + if network_acl_ids: + self.build_list_params(params, network_acl_ids, "NetworkAclId") + if filters: + self.build_filter_params(params, filters) + return self.get_list('DescribeNetworkAcls', params, + [('item', NetworkAcl)]) + + def associate_network_acl(self, network_acl_id, subnet_id): + """ + Associates a network acl with a specific subnet. + + :type network_acl_id: str + :param network_acl_id: The ID of the network ACL to associate. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to associate with. + + :rtype: str + :return: The ID of the association created + """ + + acl = self.get_all_network_acls(filters=[('association.subnet-id', subnet_id)])[0] + association = [ association for association in acl.associations if association.subnet_id == subnet_id ][0] + + params = { + 'AssociationId': association.id, + 'NetworkAclId': network_acl_id + } + + result = self.get_object('ReplaceNetworkAclAssociation', params, ResultSet) + return result.newAssociationId + + def disassociate_network_acl(self, subnet_id, vpc_id=None): + """ + Figures out what the default ACL is for the VPC, and associates + current network ACL with the default. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to which the ACL belongs. + + :type vpc_id: str + :param vpc_id: The ID of the VPC to which the ACL/subnet belongs. Queries EC2 if omitted. + + :rtype: str + :return: The ID of the association created + """ + if not vpc_id: + vpc_id = self.get_all_subnets([subnet_id])[0].vpc_id + acls = self.get_all_network_acls(filters=[('vpc-id', vpc_id), ('default', 'true')]) + default_acl_id = acls[0].id + + return self.associate_network_acl(default_acl_id, subnet_id) + + def create_network_acl(self, vpc_id): + """ + Creates a new network ACL. + + :type vpc_id: str + :param vpc_id: The VPC ID to associate this network ACL with. + + :rtype: The newly created network ACL + :return: A :class:`boto.vpc.networkacl.NetworkAcl` object + """ + params = {'VpcId': vpc_id} + return self.get_object('CreateNetworkAcl', params, NetworkAcl) + + def delete_network_acl(self, network_acl_id): + """ + Delete a network ACL + + :type network_acl_id: str + :param network_acl_id: The ID of the network_acl to delete. + + :rtype: bool + :return: True if successful + """ + params = {'NetworkAclId': network_acl_id} + return self.get_status('DeleteNetworkAcl', params) + + def create_network_acl_entry(self, network_acl_id, rule_number, protocol, rule_action, + cidr_block, egress=None, icmp_code=None, icmp_type=None, + port_range_from=None, port_range_to=None): + """ + Creates a new network ACL entry in a network ACL within a VPC. + + :type network_acl_id: str + :param network_acl_id: The ID of the network ACL for this network ACL entry. + + :type rule_number: int + :param rule_number: The rule number to assign to the entry (for example, 100). + + :type protocol: int + :param protocol: Valid values: -1 or a protocol number + (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) + + :type rule_action: str + :param rule_action: Indicates whether to allow or deny traffic that matches the rule. + + :type cidr_block: str + :param cidr_block: The CIDR range to allow or deny, in CIDR notation (for example, + 172.16.0.0/24). + + :type egress: bool + :param egress: Indicates whether this rule applies to egress traffic from the subnet (true) + or ingress traffic to the subnet (false). + + :type icmp_type: int + :param icmp_type: For the ICMP protocol, the ICMP type. You can use -1 to specify + all ICMP types. + + :type icmp_code: int + :param icmp_code: For the ICMP protocol, the ICMP code. You can use -1 to specify + all ICMP codes for the given ICMP type. + + :type port_range_from: int + :param port_range_from: The first port in the range. + + :type port_range_to: int + :param port_range_to: The last port in the range. + + + :rtype: bool + :return: True if successful + """ + params = { + 'NetworkAclId': network_acl_id, + 'RuleNumber': rule_number, + 'Protocol': protocol, + 'RuleAction': rule_action, + 'CidrBlock': cidr_block + } + + if egress is not None: + if isinstance(egress, bool): + egress = str(egress).lower() + params['Egress'] = egress + if icmp_code is not None: + params['Icmp.Code'] = icmp_code + if icmp_type is not None: + params['Icmp.Type'] = icmp_type + if port_range_from is not None: + params['PortRange.From'] = port_range_from + if port_range_to is not None: + params['PortRange.To'] = port_range_to + + return self.get_status('CreateNetworkAclEntry', params) + + def replace_network_acl_entry(self, network_acl_id, rule_number, protocol, rule_action, + cidr_block, egress=None, icmp_code=None, icmp_type=None, + port_range_from=None, port_range_to=None): + """ + Creates a new network ACL entry in a network ACL within a VPC. + + :type network_acl_id: str + :param network_acl_id: The ID of the network ACL for the id you want to replace + + :type rule_number: int + :param rule_number: The rule number that you want to replace(for example, 100). + + :type protocol: int + :param protocol: Valid values: -1 or a protocol number + (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) + + :type rule_action: str + :param rule_action: Indicates whether to allow or deny traffic that matches the rule. + + :type cidr_block: str + :param cidr_block: The CIDR range to allow or deny, in CIDR notation (for example, + 172.16.0.0/24). + + :type egress: bool + :param egress: Indicates whether this rule applies to egress traffic from the subnet (true) + or ingress traffic to the subnet (false). + + :type icmp_type: int + :param icmp_type: For the ICMP protocol, the ICMP type. You can use -1 to specify + all ICMP types. + + :type icmp_code: int + :param icmp_code: For the ICMP protocol, the ICMP code. You can use -1 to specify + all ICMP codes for the given ICMP type. + + :type port_range_from: int + :param port_range_from: The first port in the range. + + :type port_range_to: int + :param port_range_to: The last port in the range. + + + :rtype: bool + :return: True if successful + """ + params = { + 'NetworkAclId': network_acl_id, + 'RuleNumber': rule_number, + 'Protocol': protocol, + 'RuleAction': rule_action, + 'CidrBlock': cidr_block + } + + if egress is not None: + if isinstance(egress, bool): + egress = str(egress).lower() + params['Egress'] = egress + if icmp_code is not None: + params['Icmp.Code'] = icmp_code + if icmp_type is not None: + params['Icmp.Type'] = icmp_type + if port_range_from is not None: + params['PortRange.From'] = port_range_from + if port_range_to is not None: + params['PortRange.To'] = port_range_to + + return self.get_status('ReplaceNetworkAclEntry', params) + + def delete_network_acl_entry(self, network_acl_id, rule_number, egress=None): + """ + Deletes a network ACL entry from a network ACL within a VPC. + + :type network_acl_id: str + :param network_acl_id: The ID of the network ACL with the network ACL entry. + + :type rule_number: int + :param rule_number: The rule number for the entry to delete. + + :type egress: bool + :param egress: Specifies whether the rule to delete is an egress rule (true) + or ingress rule (false). + + :rtype: bool + :return: True if successful + """ + params = { + 'NetworkAclId': network_acl_id, + 'RuleNumber': rule_number + } + + if egress is not None: + if isinstance(egress, bool): + egress = str(egress).lower() + params['Egress'] = egress + + return self.get_status('DeleteNetworkAclEntry', params) + + # Internet Gateways + + def get_all_internet_gateways(self, internet_gateway_ids=None, + filters=None, dry_run=False): + """ + Get a list of internet gateways. You can filter results to return information + about only those gateways that you're interested in. + + :type internet_gateway_ids: list + :param internet_gateway_ids: A list of strings with the desired gateway IDs. + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {} + + if internet_gateway_ids: + self.build_list_params(params, internet_gateway_ids, + 'InternetGatewayId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeInternetGateways', params, + [('item', InternetGateway)]) + + def create_internet_gateway(self, dry_run=False): + """ + Creates an internet gateway for VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Newly created internet gateway. + :return: `boto.vpc.internetgateway.InternetGateway` + """ + params = {} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateInternetGateway', params, InternetGateway) + + def delete_internet_gateway(self, internet_gateway_id, dry_run=False): + """ + Deletes an internet gateway from the VPC. + + :type internet_gateway_id: str + :param internet_gateway_id: The ID of the internet gateway to delete. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Bool + :return: True if successful + """ + params = {'InternetGatewayId': internet_gateway_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteInternetGateway', params) + + def attach_internet_gateway(self, internet_gateway_id, vpc_id, + dry_run=False): + """ + Attach an internet gateway to a specific VPC. + + :type internet_gateway_id: str + :param internet_gateway_id: The ID of the internet gateway to attach. + + :type vpc_id: str + :param vpc_id: The ID of the VPC to attach to. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Bool + :return: True if successful + """ + params = { + 'InternetGatewayId': internet_gateway_id, + 'VpcId': vpc_id + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('AttachInternetGateway', params) + + def detach_internet_gateway(self, internet_gateway_id, vpc_id, + dry_run=False): + """ + Detach an internet gateway from a specific VPC. + + :type internet_gateway_id: str + :param internet_gateway_id: The ID of the internet gateway to detach. + + :type vpc_id: str + :param vpc_id: The ID of the VPC to attach to. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Bool + :return: True if successful + """ + params = { + 'InternetGatewayId': internet_gateway_id, + 'VpcId': vpc_id + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachInternetGateway', params) + + # Customer Gateways + + def get_all_customer_gateways(self, customer_gateway_ids=None, + filters=None, dry_run=False): + """ + Retrieve information about your CustomerGateways. You can filter + results to return information only about those CustomerGateways that + match your search parameters. Otherwise, all CustomerGateways + associated with your account are returned. + + :type customer_gateway_ids: list + :param customer_gateway_ids: A list of strings with the desired + CustomerGateway ID's. + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, the state of the CustomerGateway + (pending,available,deleting,deleted) + - *type*, the type of customer gateway (ipsec.1) + - *ipAddress* the IP address of customer gateway's + internet-routable external inteface + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.customergateway.CustomerGateway` + """ + params = {} + if customer_gateway_ids: + self.build_list_params(params, customer_gateway_ids, + 'CustomerGatewayId') + if filters: + self.build_filter_params(params, filters) + + if dry_run: + params['DryRun'] = 'true' + + return self.get_list('DescribeCustomerGateways', params, + [('item', CustomerGateway)]) + + def create_customer_gateway(self, type, ip_address, bgp_asn, dry_run=False): + """ + Create a new Customer Gateway + + :type type: str + :param type: Type of VPN Connection. Only valid value currently is 'ipsec.1' + + :type ip_address: str + :param ip_address: Internet-routable IP address for customer's gateway. + Must be a static address. + + :type bgp_asn: int + :param bgp_asn: Customer gateway's Border Gateway Protocol (BGP) + Autonomous System Number (ASN) + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created CustomerGateway + :return: A :class:`boto.vpc.customergateway.CustomerGateway` object + """ + params = {'Type': type, + 'IpAddress': ip_address, + 'BgpAsn': bgp_asn} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateCustomerGateway', params, CustomerGateway) + + def delete_customer_gateway(self, customer_gateway_id, dry_run=False): + """ + Delete a Customer Gateway. + + :type customer_gateway_id: str + :param customer_gateway_id: The ID of the customer_gateway to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'CustomerGatewayId': customer_gateway_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteCustomerGateway', params) + + # VPN Gateways + + def get_all_vpn_gateways(self, vpn_gateway_ids=None, filters=None, + dry_run=False): + """ + Retrieve information about your VpnGateways. You can filter results to + return information only about those VpnGateways that match your search + parameters. Otherwise, all VpnGateways associated with your account + are returned. + + :type vpn_gateway_ids: list + :param vpn_gateway_ids: A list of strings with the desired VpnGateway ID's + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, a list of states of the VpnGateway + (pending,available,deleting,deleted) + - *type*, a list types of customer gateway (ipsec.1) + - *availabilityZone*, a list of Availability zones the + VPN gateway is in. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.customergateway.VpnGateway` + """ + params = {} + if vpn_gateway_ids: + self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpnGateways', params, + [('item', VpnGateway)]) + + def create_vpn_gateway(self, type, availability_zone=None, dry_run=False): + """ + Create a new Vpn Gateway + + :type type: str + :param type: Type of VPN Connection. Only valid value currently is 'ipsec.1' + + :type availability_zone: str + :param availability_zone: The Availability Zone where you want the VPN gateway. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created VpnGateway + :return: A :class:`boto.vpc.vpngateway.VpnGateway` object + """ + params = {'Type': type} + if availability_zone: + params['AvailabilityZone'] = availability_zone + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateVpnGateway', params, VpnGateway) + + def delete_vpn_gateway(self, vpn_gateway_id, dry_run=False): + """ + Delete a Vpn Gateway. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the vpn_gateway to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpnGatewayId': vpn_gateway_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpnGateway', params) + + def attach_vpn_gateway(self, vpn_gateway_id, vpc_id, dry_run=False): + """ + Attaches a VPN gateway to a VPC. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the vpn_gateway to attach + + :type vpc_id: str + :param vpc_id: The ID of the VPC you want to attach the gateway to. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: An attachment + :return: a :class:`boto.vpc.vpngateway.Attachment` + """ + params = {'VpnGatewayId': vpn_gateway_id, + 'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('AttachVpnGateway', params, Attachment) + + def detach_vpn_gateway(self, vpn_gateway_id, vpc_id, dry_run=False): + """ + Detaches a VPN gateway from a VPC. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the vpn_gateway to detach + + :type vpc_id: str + :param vpc_id: The ID of the VPC you want to detach the gateway from. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpnGatewayId': vpn_gateway_id, + 'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachVpnGateway', params) + + # Subnets + + def get_all_subnets(self, subnet_ids=None, filters=None, dry_run=False): + """ + Retrieve information about your Subnets. You can filter results to + return information only about those Subnets that match your search + parameters. Otherwise, all Subnets associated with your account + are returned. + + :type subnet_ids: list + :param subnet_ids: A list of strings with the desired Subnet ID's + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, a list of states of the Subnet + (pending,available) + - *vpcId*, a list of IDs of the VPC that the subnet is in. + - *cidrBlock*, a list of CIDR blocks of the subnet + - *availabilityZone*, list of the Availability Zones + the subnet is in. + + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.subnet.Subnet` + """ + params = {} + if subnet_ids: + self.build_list_params(params, subnet_ids, 'SubnetId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeSubnets', params, [('item', Subnet)]) + + def create_subnet(self, vpc_id, cidr_block, availability_zone=None, + dry_run=False): + """ + Create a new Subnet + + :type vpc_id: str + :param vpc_id: The ID of the VPC where you want to create the subnet. + + :type cidr_block: str + :param cidr_block: The CIDR block you want the subnet to cover. + + :type availability_zone: str + :param availability_zone: The AZ you want the subnet in + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created Subnet + :return: A :class:`boto.vpc.customergateway.Subnet` object + """ + params = {'VpcId': vpc_id, + 'CidrBlock': cidr_block} + if availability_zone: + params['AvailabilityZone'] = availability_zone + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateSubnet', params, Subnet) + + def delete_subnet(self, subnet_id, dry_run=False): + """ + Delete a subnet. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'SubnetId': subnet_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteSubnet', params) + + # DHCP Options + + def get_all_dhcp_options(self, dhcp_options_ids=None, filters=None, dry_run=False): + """ + Retrieve information about your DhcpOptions. + + :type dhcp_options_ids: list + :param dhcp_options_ids: A list of strings with the desired DhcpOption ID's + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.dhcpoptions.DhcpOptions` + """ + params = {} + if dhcp_options_ids: + self.build_list_params(params, dhcp_options_ids, 'DhcpOptionsId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeDhcpOptions', params, + [('item', DhcpOptions)]) + + def create_dhcp_options(self, domain_name=None, domain_name_servers=None, + ntp_servers=None, netbios_name_servers=None, + netbios_node_type=None, dry_run=False): + """ + Create a new DhcpOption + + This corresponds to + http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateDhcpOptions.html + + :type domain_name: str + :param domain_name: A domain name of your choice (for example, + example.com) + + :type domain_name_servers: list of strings + :param domain_name_servers: The IP address of a domain name server. You + can specify up to four addresses. + + :type ntp_servers: list of strings + :param ntp_servers: The IP address of a Network Time Protocol (NTP) + server. You can specify up to four addresses. + + :type netbios_name_servers: list of strings + :param netbios_name_servers: The IP address of a NetBIOS name server. + You can specify up to four addresses. + + :type netbios_node_type: str + :param netbios_node_type: The NetBIOS node type (1, 2, 4, or 8). For + more information about the values, see RFC 2132. We recommend you + only use 2 at this time (broadcast and multicast are currently not + supported). + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created DhcpOption + :return: A :class:`boto.vpc.customergateway.DhcpOption` object + """ + + key_counter = 1 + params = {} + + def insert_option(params, name, value): + params['DhcpConfiguration.%d.Key' % (key_counter,)] = name + if isinstance(value, (list, tuple)): + for idx, value in enumerate(value, 1): + key_name = 'DhcpConfiguration.%d.Value.%d' % ( + key_counter, idx) + params[key_name] = value + else: + key_name = 'DhcpConfiguration.%d.Value.1' % (key_counter,) + params[key_name] = value + + return key_counter + 1 + + if domain_name: + key_counter = insert_option(params, + 'domain-name', domain_name) + if domain_name_servers: + key_counter = insert_option(params, + 'domain-name-servers', domain_name_servers) + if ntp_servers: + key_counter = insert_option(params, + 'ntp-servers', ntp_servers) + if netbios_name_servers: + key_counter = insert_option(params, + 'netbios-name-servers', netbios_name_servers) + if netbios_node_type: + key_counter = insert_option(params, + 'netbios-node-type', netbios_node_type) + if dry_run: + params['DryRun'] = 'true' + + return self.get_object('CreateDhcpOptions', params, DhcpOptions) + + def delete_dhcp_options(self, dhcp_options_id, dry_run=False): + """ + Delete a DHCP Options + + :type dhcp_options_id: str + :param dhcp_options_id: The ID of the DHCP Options to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'DhcpOptionsId': dhcp_options_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteDhcpOptions', params) + + def associate_dhcp_options(self, dhcp_options_id, vpc_id, dry_run=False): + """ + Associate a set of Dhcp Options with a VPC. + + :type dhcp_options_id: str + :param dhcp_options_id: The ID of the Dhcp Options + + :type vpc_id: str + :param vpc_id: The ID of the VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'DhcpOptionsId': dhcp_options_id, + 'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('AssociateDhcpOptions', params) + + # VPN Connection + + def get_all_vpn_connections(self, vpn_connection_ids=None, filters=None, + dry_run=False): + """ + Retrieve information about your VPN_CONNECTIONs. You can filter results to + return information only about those VPN_CONNECTIONs that match your search + parameters. Otherwise, all VPN_CONNECTIONs associated with your account + are returned. + + :type vpn_connection_ids: list + :param vpn_connection_ids: A list of strings with the desired VPN_CONNECTION ID's + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, a list of states of the VPN_CONNECTION + pending,available,deleting,deleted + - *type*, a list of types of connection, currently 'ipsec.1' + - *customerGatewayId*, a list of IDs of the customer gateway + associated with the VPN + - *vpnGatewayId*, a list of IDs of the VPN gateway associated + with the VPN connection + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpn_connection.vpnconnection.VpnConnection` + """ + params = {} + if vpn_connection_ids: + self.build_list_params(params, vpn_connection_ids, + 'VpnConnectionId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpnConnections', params, + [('item', VpnConnection)]) + + def create_vpn_connection(self, type, customer_gateway_id, vpn_gateway_id, + static_routes_only=None, dry_run=False): + """ + Create a new VPN Connection. + + :type type: str + :param type: The type of VPN Connection. Currently only 'ipsec.1' + is supported + + :type customer_gateway_id: str + :param customer_gateway_id: The ID of the customer gateway. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the VPN gateway. + + :type static_routes_only: bool + :param static_routes_only: Indicates whether the VPN connection + requires static routes. If you are creating a VPN connection + for a device that does not support BGP, you must specify true. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created VpnConnection + :return: A :class:`boto.vpc.vpnconnection.VpnConnection` object + """ + params = {'Type': type, + 'CustomerGatewayId': customer_gateway_id, + 'VpnGatewayId': vpn_gateway_id} + if static_routes_only is not None: + if isinstance(static_routes_only, bool): + static_routes_only = str(static_routes_only).lower() + params['Options.StaticRoutesOnly'] = static_routes_only + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateVpnConnection', params, VpnConnection) + + def delete_vpn_connection(self, vpn_connection_id, dry_run=False): + """ + Delete a VPN Connection. + + :type vpn_connection_id: str + :param vpn_connection_id: The ID of the vpn_connection to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpnConnectionId': vpn_connection_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpnConnection', params) + + def disable_vgw_route_propagation(self, route_table_id, gateway_id, + dry_run=False): + """ + Disables a virtual private gateway (VGW) from propagating routes to the + routing tables of an Amazon VPC. + + :type route_table_id: str + :param route_table_id: The ID of the routing table. + + :type gateway_id: str + :param gateway_id: The ID of the virtual private gateway. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'GatewayId': gateway_id, + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DisableVgwRoutePropagation', params) + + def enable_vgw_route_propagation(self, route_table_id, gateway_id, + dry_run=False): + """ + Enables a virtual private gateway (VGW) to propagate routes to the + routing tables of an Amazon VPC. + + :type route_table_id: str + :param route_table_id: The ID of the routing table. + + :type gateway_id: str + :param gateway_id: The ID of the virtual private gateway. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'GatewayId': gateway_id, + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('EnableVgwRoutePropagation', params) + + def create_vpn_connection_route(self, destination_cidr_block, + vpn_connection_id, dry_run=False): + """ + Creates a new static route associated with a VPN connection between an + existing virtual private gateway and a VPN customer gateway. The static + route allows traffic to be routed from the virtual private gateway to + the VPN customer gateway. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR block associated with the local + subnet of the customer data center. + + :type vpn_connection_id: str + :param vpn_connection_id: The ID of the VPN connection. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'DestinationCidrBlock': destination_cidr_block, + 'VpnConnectionId': vpn_connection_id, + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('CreateVpnConnectionRoute', params) + + def delete_vpn_connection_route(self, destination_cidr_block, + vpn_connection_id, dry_run=False): + """ + Deletes a static route associated with a VPN connection between an + existing virtual private gateway and a VPN customer gateway. The static + route allows traffic to be routed from the virtual private gateway to + the VPN customer gateway. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR block associated with the local + subnet of the customer data center. + + :type vpn_connection_id: str + :param vpn_connection_id: The ID of the VPN connection. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'DestinationCidrBlock': destination_cidr_block, + 'VpnConnectionId': vpn_connection_id, + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpnConnectionRoute', params) + + def get_all_vpc_peering_connections(self, vpc_peering_connection_ids=None, + filters=None, dry_run=False): + """ + Retrieve information about your VPC peering connections. You + can filter results to return information only about those VPC + peering connections that match your search parameters. + Otherwise, all VPC peering connections associated with your + account are returned. + + :type vpc_peering_connection_ids: list + :param vpc_peering_connection_ids: A list of strings with the desired VPC + peering connection ID's + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + Possible filter keys are: + + * *accepter-vpc-info.cidr-block* - The CIDR block of the peer VPC. + * *accepter-vpc-info.owner-id* - The AWS account ID of the owner + of the peer VPC. + * *accepter-vpc-info.vpc-id* - The ID of the peer VPC. + * *expiration-time* - The expiration date and time for the VPC + peering connection. + * *requester-vpc-info.cidr-block* - The CIDR block of the + requester's VPC. + * *requester-vpc-info.owner-id* - The AWS account ID of the + owner of the requester VPC. + * *requester-vpc-info.vpc-id* - The ID of the requester VPC. + * *status-code* - The status of the VPC peering connection. + * *status-message* - A message that provides more information + about the status of the VPC peering connection, if applicable. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.vpc.VPC` + """ + params = {} + if vpc_peering_connection_ids: + self.build_list_params(params, vpc_peering_connection_ids, 'VpcPeeringConnectionId') + if filters: + self.build_filter_params(params, dict(filters)) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpcPeeringConnections', params, [('item', VpcPeeringConnection)]) + + def create_vpc_peering_connection(self, vpc_id, peer_vpc_id, + peer_owner_id=None, dry_run=False): + """ + Create a new VPN Peering connection. + + :type vpc_id: str + :param vpc_id: The ID of the requester VPC. + + :type peer_vpc_id: str + :param vpc_peer_id: The ID of the VPC with which you are creating the peering connection. + + :type peer_owner_id: str + :param peer_owner_id: The AWS account ID of the owner of the peer VPC. + + :rtype: The newly created VpcPeeringConnection + :return: A :class:`boto.vpc.vpc_peering_connection.VpcPeeringConnection` object + """ + params = {'VpcId': vpc_id, + 'PeerVpcId': peer_vpc_id } + if peer_owner_id is not None: + params['PeerOwnerId'] = peer_owner_id + if dry_run: + params['DryRun'] = 'true' + + return self.get_object('CreateVpcPeeringConnection', params, + VpcPeeringConnection) + + def delete_vpc_peering_connection(self, vpc_peering_connection_id, dry_run=False): + """ + Deletes a VPC peering connection. Either the owner of the requester + VPC or the owner of the peer VPC can delete the VPC peering connection + if it's in the active state. The owner of the requester VPC can delete + a VPC peering connection in the pending-acceptance state. + + :type vpc_peering_connection_id: str + :param vpc_peering_connection_id: The ID of the VPC peering connection. + + :rtype: bool + :return: True if successful + """ + params = { + 'VpcPeeringConnectionId': vpc_peering_connection_id + } + + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpcPeeringConnection', params) + + def reject_vpc_peering_connection(self, vpc_peering_connection_id, dry_run=False): + """ + Rejects a VPC peering connection request. The VPC peering connection + must be in the pending-acceptance state. + + :type vpc_peering_connection_id: str + :param vpc_peering_connection_id: The ID of the VPC peering connection. + + :rtype: bool + :return: True if successful + """ + params = { + 'VpcPeeringConnectionId': vpc_peering_connection_id + } + + if dry_run: + params['DryRun'] = 'true' + return self.get_status('RejectVpcPeeringConnection', params) + + def accept_vpc_peering_connection(self, vpc_peering_connection_id, dry_run=False): + """ + Acceptss a VPC peering connection request. The VPC peering connection + must be in the pending-acceptance state. + + :type vpc_peering_connection_id: str + :param vpc_peering_connection_id: The ID of the VPC peering connection. + + :rtype: Accepted VpcPeeringConnection + :return: A :class:`boto.vpc.vpc_peering_connection.VpcPeeringConnection` object + """ + params = { + 'VpcPeeringConnectionId': vpc_peering_connection_id + } + + if dry_run: + params['DryRun'] = 'true' + + return self.get_object('AcceptVpcPeeringConnection', params, + VpcPeeringConnection) + + def get_all_classic_link_vpcs(self, vpc_ids=None, filters=None, + dry_run=False): + """ + Describes the ClassicLink status of one or more VPCs. + + :type vpc_ids: list + :param vpc_ids: A list of strings with the desired VPC ID's + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + + :rtype: list + :return: A list of :class:`boto.vpc.vpc.VPC` + """ + params = {} + if vpc_ids: + self.build_list_params(params, vpc_ids, 'VpcId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpcClassicLink', params, [('item', VPC)], + verb='POST') + + def attach_classic_link_vpc(self, vpc_id, instance_id, groups, + dry_run=False): + """ + Links an EC2-Classic instance to a ClassicLink-enabled VPC through one + or more of the VPC's security groups. You cannot link an EC2-Classic + instance to more than one VPC at a time. You can only link an instance + that's in the running state. An instance is automatically unlinked from + a VPC when it's stopped. You can link it to the VPC again when you + restart it. + + After you've linked an instance, you cannot change the VPC security + groups that are associated with it. To change the security groups, you + must first unlink the instance, and then link it again. + + Linking your instance to a VPC is sometimes referred to as attaching + your instance. + + :type vpc_id: str + :param vpc_id: The ID of a ClassicLink-enabled VPC. + + :type intance_id: str + :param instance_is: The ID of a ClassicLink-enabled VPC. + + :tye groups: list + :param groups: The ID of one or more of the VPC's security groups. + You cannot specify security groups from a different VPC. The + members of the list can be + :class:`boto.ec2.securitygroup.SecurityGroup` objects or + strings of the id's of the security groups. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id, 'InstanceId': instance_id} + if dry_run: + params['DryRun'] = 'true' + l = [] + for group in groups: + if hasattr(group, 'id'): + l.append(group.id) + else: + l.append(group) + self.build_list_params(params, l, 'SecurityGroupId') + return self.get_status('AttachClassicLinkVpc', params) + + def detach_classic_link_vpc(self, vpc_id, instance_id, dry_run=False): + """ + Unlinks a linked EC2-Classic instance from a VPC. After the instance + has been unlinked, the VPC security groups are no longer associated + with it. An instance is automatically unlinked from a VPC when + it's stopped. + + :type vpc_id: str + :param vpc_id: The ID of the instance to unlink from the VPC. + + :type intance_id: str + :param instance_is: The ID of the VPC to which the instance is linked. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id, 'InstanceId': instance_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachClassicLinkVpc', params) + + def disable_vpc_classic_link(self, vpc_id, dry_run=False): + """ + Disables ClassicLink for a VPC. You cannot disable ClassicLink for a + VPC that has EC2-Classic instances linked to it. + + :type vpc_id: str + :param vpc_id: The ID of the VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DisableVpcClassicLink', params) + + def enable_vpc_classic_link(self, vpc_id, dry_run=False): + """ + Enables a VPC for ClassicLink. You can then link EC2-Classic instances + to your ClassicLink-enabled VPC to allow communication over private IP + addresses. You cannot enable your VPC for ClassicLink if any of your + VPC's route tables have existing routes for address ranges within the + 10.0.0.0/8 IP address range, excluding local routes for VPCs in the + 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. + + :type vpc_id: str + :param vpc_id: The ID of the VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('EnableVpcClassicLink', params) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vpc/customergateway.py b/desktop/core/ext-py/boto-2.38.0/boto/vpc/customergateway.py new file mode 100644 index 0000000000000000000000000000000000000000..8f19a81a569ac5bd248476fdc75e947c5f0325b2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/vpc/customergateway.py @@ -0,0 +1,54 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Customer Gateway +""" + +from boto.ec2.ec2object import TaggedEC2Object + + +class CustomerGateway(TaggedEC2Object): + + def __init__(self, connection=None): + super(CustomerGateway, self).__init__(connection) + self.id = None + self.type = None + self.state = None + self.ip_address = None + self.bgp_asn = None + + def __repr__(self): + return 'CustomerGateway:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'customerGatewayId': + self.id = value + elif name == 'ipAddress': + self.ip_address = value + elif name == 'type': + self.type = value + elif name == 'state': + self.state = value + elif name == 'bgpAsn': + self.bgp_asn = int(value) + else: + setattr(self, name, value) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vpc/dhcpoptions.py b/desktop/core/ext-py/boto-2.38.0/boto/vpc/dhcpoptions.py new file mode 100644 index 0000000000000000000000000000000000000000..758d452cebe001c4f96ed6cbefbf99b99964c7a3 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/vpc/dhcpoptions.py @@ -0,0 +1,72 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a DHCP Options set +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class DhcpValueSet(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'value': + self.append(value) + +class DhcpConfigSet(dict): + + def startElement(self, name, attrs, connection): + if name == 'valueSet': + if self._name not in self: + self[self._name] = DhcpValueSet() + return self[self._name] + + def endElement(self, name, value, connection): + if name == 'key': + self._name = value + +class DhcpOptions(TaggedEC2Object): + + def __init__(self, connection=None): + super(DhcpOptions, self).__init__(connection) + self.id = None + self.options = None + + def __repr__(self): + return 'DhcpOptions:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(DhcpOptions, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'dhcpConfigurationSet': + self.options = DhcpConfigSet() + return self.options + + def endElement(self, name, value, connection): + if name == 'dhcpOptionsId': + self.id = value + else: + setattr(self, name, value) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vpc/internetgateway.py b/desktop/core/ext-py/boto-2.38.0/boto/vpc/internetgateway.py new file mode 100644 index 0000000000000000000000000000000000000000..09f1fe04e78c0c749837525dd334c49c9f0b9d8e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/vpc/internetgateway.py @@ -0,0 +1,72 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an Internet Gateway +""" + +from boto.ec2.ec2object import TaggedEC2Object +from boto.resultset import ResultSet + +class InternetGateway(TaggedEC2Object): + def __init__(self, connection=None): + super(InternetGateway, self).__init__(connection) + self.id = None + self.attachments = [] + + def __repr__(self): + return 'InternetGateway:%s' % self.id + + def startElement(self, name, attrs, connection): + result = super(InternetGateway, self).startElement(name, attrs, connection) + + if result is not None: + # Parent found an interested element, just return it + return result + + if name == 'attachmentSet': + self.attachments = ResultSet([('item', InternetGatewayAttachment)]) + return self.attachments + else: + return None + + def endElement(self, name, value, connection): + if name == 'internetGatewayId': + self.id = value + else: + setattr(self, name, value) + +class InternetGatewayAttachment(object): + def __init__(self, connection=None): + self.vpc_id = None + self.state = None + + def __repr__(self): + return 'InternetGatewayAttachment:%s' % self.vpc_id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.vpc_id = value + elif name == 'state': + self.state = value diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vpc/networkacl.py b/desktop/core/ext-py/boto-2.38.0/boto/vpc/networkacl.py new file mode 100644 index 0000000000000000000000000000000000000000..9b8b1cddcfbec92cf672be5f8be052089e79682c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/vpc/networkacl.py @@ -0,0 +1,164 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Network ACL +""" + +from boto.ec2.ec2object import TaggedEC2Object +from boto.resultset import ResultSet + + +class Icmp(object): + """ + Defines the ICMP code and type. + """ + def __init__(self, connection=None): + self.code = None + self.type = None + + def __repr__(self): + return 'Icmp::code:%s, type:%s)' % ( self.code, self.type) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + + if name == 'code': + self.code = value + elif name == 'type': + self.type = value + +class NetworkAcl(TaggedEC2Object): + + def __init__(self, connection=None): + super(NetworkAcl, self).__init__(connection) + self.id = None + self.vpc_id = None + self.network_acl_entries = [] + self.associations = [] + + def __repr__(self): + return 'NetworkAcl:%s' % self.id + + def startElement(self, name, attrs, connection): + result = super(NetworkAcl, self).startElement(name, attrs, connection) + + if result is not None: + # Parent found an interested element, just return it + return result + + if name == 'entrySet': + self.network_acl_entries = ResultSet([('item', NetworkAclEntry)]) + return self.network_acl_entries + elif name == 'associationSet': + self.associations = ResultSet([('item', NetworkAclAssociation)]) + return self.associations + else: + return None + + def endElement(self, name, value, connection): + if name == 'networkAclId': + self.id = value + elif name == 'vpcId': + self.vpc_id = value + else: + setattr(self, name, value) + +class NetworkAclEntry(object): + def __init__(self, connection=None): + self.rule_number = None + self.protocol = None + self.rule_action = None + self.egress = None + self.cidr_block = None + self.port_range = PortRange() + self.icmp = Icmp() + + def __repr__(self): + return 'Acl:%s' % self.rule_number + + def startElement(self, name, attrs, connection): + + if name == 'portRange': + return self.port_range + elif name == 'icmpTypeCode': + return self.icmp + else: + return None + + def endElement(self, name, value, connection): + if name == 'cidrBlock': + self.cidr_block = value + elif name == 'egress': + self.egress = value + elif name == 'protocol': + self.protocol = value + elif name == 'ruleAction': + self.rule_action = value + elif name == 'ruleNumber': + self.rule_number = value + + +class NetworkAclAssociation(object): + def __init__(self, connection=None): + self.id = None + self.subnet_id = None + self.network_acl_id = None + + def __repr__(self): + return 'NetworkAclAssociation:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'networkAclAssociationId': + self.id = value + elif name == 'networkAclId': + self.network_acl_id = value + elif name == 'subnetId': + self.subnet_id = value + +class PortRange(object): + """ + Define the port range for the ACL entry if it is tcp / udp + """ + + def __init__(self, connection=None): + self.from_port = None + self.to_port = None + + def __repr__(self): + return 'PortRange:(%s-%s)' % ( self.from_port, self.to_port) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + + if name == 'from': + self.from_port = value + elif name == 'to': + self.to_port = value + + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vpc/routetable.py b/desktop/core/ext-py/boto-2.38.0/boto/vpc/routetable.py new file mode 100644 index 0000000000000000000000000000000000000000..21060ee945f31dae7febc1d5fb667b3b68db5f07 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/vpc/routetable.py @@ -0,0 +1,115 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Route Table +""" + +from boto.ec2.ec2object import TaggedEC2Object +from boto.resultset import ResultSet + +class RouteTable(TaggedEC2Object): + + def __init__(self, connection=None): + super(RouteTable, self).__init__(connection) + self.id = None + self.vpc_id = None + self.routes = [] + self.associations = [] + + def __repr__(self): + return 'RouteTable:%s' % self.id + + def startElement(self, name, attrs, connection): + result = super(RouteTable, self).startElement(name, attrs, connection) + + if result is not None: + # Parent found an interested element, just return it + return result + + if name == 'routeSet': + self.routes = ResultSet([('item', Route)]) + return self.routes + elif name == 'associationSet': + self.associations = ResultSet([('item', RouteAssociation)]) + return self.associations + else: + return None + + def endElement(self, name, value, connection): + if name == 'routeTableId': + self.id = value + elif name == 'vpcId': + self.vpc_id = value + else: + setattr(self, name, value) + +class Route(object): + def __init__(self, connection=None): + self.destination_cidr_block = None + self.gateway_id = None + self.instance_id = None + self.interface_id = None + self.vpc_peering_connection_id = None + self.state = None + + def __repr__(self): + return 'Route:%s' % self.destination_cidr_block + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'destinationCidrBlock': + self.destination_cidr_block = value + elif name == 'gatewayId': + self.gateway_id = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'networkInterfaceId': + self.interface_id = value + elif name == 'vpcPeeringConnectionId': + self.vpc_peering_connection_id = value + elif name == 'state': + self.state = value + +class RouteAssociation(object): + def __init__(self, connection=None): + self.id = None + self.route_table_id = None + self.subnet_id = None + self.main = False + + def __repr__(self): + return 'RouteAssociation:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'routeTableAssociationId': + self.id = value + elif name == 'routeTableId': + self.route_table_id = value + elif name == 'subnetId': + self.subnet_id = value + elif name == 'main': + self.main = value == 'true' diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vpc/subnet.py b/desktop/core/ext-py/boto-2.38.0/boto/vpc/subnet.py new file mode 100644 index 0000000000000000000000000000000000000000..4d6f902539447ebd4ad246a0ebc2822624cc218b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/vpc/subnet.py @@ -0,0 +1,57 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Subnet +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class Subnet(TaggedEC2Object): + + def __init__(self, connection=None): + super(Subnet, self).__init__(connection) + self.id = None + self.vpc_id = None + self.state = None + self.cidr_block = None + self.available_ip_address_count = 0 + self.availability_zone = None + + def __repr__(self): + return 'Subnet:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'subnetId': + self.id = value + elif name == 'vpcId': + self.vpc_id = value + elif name == 'state': + self.state = value + elif name == 'cidrBlock': + self.cidr_block = value + elif name == 'availableIpAddressCount': + self.available_ip_address_count = int(value) + elif name == 'availabilityZone': + self.availability_zone = value + else: + setattr(self, name, value) + diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vpc/vpc.py b/desktop/core/ext-py/boto-2.38.0/boto/vpc/vpc.py new file mode 100644 index 0000000000000000000000000000000000000000..219a0b590b205acda62a64769237241b295b7005 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/vpc/vpc.py @@ -0,0 +1,204 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Virtual Private Cloud. +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class VPC(TaggedEC2Object): + + def __init__(self, connection=None): + """ + Represents a VPC. + + :ivar id: The unique ID of the VPC. + :ivar dhcp_options_id: The ID of the set of DHCP options you've associated with the VPC + (or default if the default options are associated with the VPC). + :ivar state: The current state of the VPC. + :ivar cidr_block: The CIDR block for the VPC. + :ivar is_default: Indicates whether the VPC is the default VPC. + :ivar instance_tenancy: The allowed tenancy of instances launched into the VPC. + :ivar classic_link_enabled: Indicates whether ClassicLink is enabled. + """ + super(VPC, self).__init__(connection) + self.id = None + self.dhcp_options_id = None + self.state = None + self.cidr_block = None + self.is_default = None + self.instance_tenancy = None + self.classic_link_enabled = None + + def __repr__(self): + return 'VPC:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.id = value + elif name == 'dhcpOptionsId': + self.dhcp_options_id = value + elif name == 'state': + self.state = value + elif name == 'cidrBlock': + self.cidr_block = value + elif name == 'isDefault': + self.is_default = True if value == 'true' else False + elif name == 'instanceTenancy': + self.instance_tenancy = value + elif name == 'classicLinkEnabled': + self.classic_link_enabled = value + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_vpc(self.id) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def _get_status_then_update_vpc(self, get_status_method, validate=False, + dry_run=False): + vpc_list = get_status_method( + [self.id], + dry_run=dry_run + ) + if len(vpc_list): + updated_vpc = vpc_list[0] + self._update(updated_vpc) + elif validate: + raise ValueError('%s is not a valid VPC ID' % (self.id,)) + + def update(self, validate=False, dry_run=False): + self._get_status_then_update_vpc( + self.connection.get_all_vpcs, + validate=validate, + dry_run=dry_run + ) + return self.state + + def update_classic_link_enabled(self, validate=False, dry_run=False): + """ + Updates instance's classic_link_enabled attribute + + :rtype: bool + :return: self.classic_link_enabled after update has occurred. + """ + self._get_status_then_update_vpc( + self.connection.get_all_classic_link_vpcs, + validate=validate, + dry_run=dry_run + ) + return self.classic_link_enabled + + def disable_classic_link(self, dry_run=False): + """ + Disables ClassicLink for a VPC. You cannot disable ClassicLink for a + VPC that has EC2-Classic instances linked to it. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self.connection.disable_vpc_classic_link(self.id, + dry_run=dry_run) + + def enable_classic_link(self, dry_run=False): + """ + Enables a VPC for ClassicLink. You can then link EC2-Classic instances + to your ClassicLink-enabled VPC to allow communication over private IP + addresses. You cannot enable your VPC for ClassicLink if any of your + VPC's route tables have existing routes for address ranges within the + 10.0.0.0/8 IP address range, excluding local routes for VPCs in the + 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self.connection.enable_vpc_classic_link(self.id, + dry_run=dry_run) + + def attach_classic_instance(self, instance_id, groups, dry_run=False): + """ + Links an EC2-Classic instance to a ClassicLink-enabled VPC through one + or more of the VPC's security groups. You cannot link an EC2-Classic + instance to more than one VPC at a time. You can only link an instance + that's in the running state. An instance is automatically unlinked from + a VPC when it's stopped. You can link it to the VPC again when you + restart it. + + After you've linked an instance, you cannot change the VPC security + groups that are associated with it. To change the security groups, you + must first unlink the instance, and then link it again. + + Linking your instance to a VPC is sometimes referred to as attaching + your instance. + + :type intance_id: str + :param instance_is: The ID of a ClassicLink-enabled VPC. + + :tye groups: list + :param groups: The ID of one or more of the VPC's security groups. + You cannot specify security groups from a different VPC. The + members of the list can be + :class:`boto.ec2.securitygroup.SecurityGroup` objects or + strings of the id's of the security groups. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self.connection.attach_classic_link_vpc( + vpc_id=self.id, + instance_id=instance_id, + groups=groups, + dry_run=dry_run + ) + + def detach_classic_instance(self, instance_id, dry_run=False): + """ + Unlinks a linked EC2-Classic instance from a VPC. After the instance + has been unlinked, the VPC security groups are no longer associated + with it. An instance is automatically unlinked from a VPC when + it's stopped. + + :type intance_id: str + :param instance_is: The ID of the VPC to which the instance is linked. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self.connection.detach_classic_link_vpc( + vpc_id=self.id, + instance_id=instance_id, + dry_run=dry_run + ) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vpc/vpc_peering_connection.py b/desktop/core/ext-py/boto-2.38.0/boto/vpc/vpc_peering_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..cdb9af8daed348daf773c46ab373e7287e0e5bd5 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/vpc/vpc_peering_connection.py @@ -0,0 +1,163 @@ +# Copyright (c) 2014 Skytap http://skytap.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a VPC Peering Connection. +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class VpcInfo(object): + def __init__(self): + """ + Information on peer Vpc. + + :ivar id: The unique ID of peer Vpc. + :ivar owner_id: Owner of peer Vpc. + :ivar cidr_block: CIDR Block of peer Vpc. + """ + + self.vpc_id = None + self.owner_id = None + self.cidr_block = None + + def __repr__(self): + return 'VpcInfo:%s' % self.vpc_id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.vpc_id = value + elif name == 'ownerId': + self.owner_id = value + elif name == 'cidrBlock': + self.cidr_block = value + else: + setattr(self, name, value) + +class VpcPeeringConnectionStatus(object): + """ + The status of VPC peering connection. + + :ivar code: The status of the VPC peering connection. Valid values are: + + * pending-acceptance + * failed + * expired + * provisioning + * active + * deleted + * rejected + + :ivar message: A message that provides more information about the status of the VPC peering connection, if applicable. + """ + def __init__(self, code=0, message=None): + self.code = code + self.message = message + + def __repr__(self): + return '%s(%d)' % (self.code, self.message) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'code': + self.code = value + elif name == 'message': + self.message = value + else: + setattr(self, name, value) + + + +class VpcPeeringConnection(TaggedEC2Object): + + def __init__(self, connection=None): + """ + Represents a VPC peering connection. + + :ivar id: The unique ID of the VPC peering connection. + :ivar accepter_vpc_info: Information on peer Vpc. + :ivar requester_vpc_info: Information on requester Vpc. + :ivar expiration_time: The expiration date and time for the VPC peering connection. + :ivar status_code: The status of the VPC peering connection. + :ivar status_message: A message that provides more information about the status of the VPC peering connection, if applicable. + """ + super(VpcPeeringConnection, self).__init__(connection) + self.id = None + self.accepter_vpc_info = VpcInfo() + self.requester_vpc_info = VpcInfo() + self.expiration_time = None + self._status = VpcPeeringConnectionStatus() + + @property + def status_code(self): + return self._status.code + + @property + def status_message(self): + return self._status.message + + def __repr__(self): + return 'VpcPeeringConnection:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(VpcPeeringConnection, self).startElement(name, attrs, connection) + if retval is not None: + return retval + + if name == 'requesterVpcInfo': + return self.requester_vpc_info + elif name == 'accepterVpcInfo': + return self.accepter_vpc_info + elif name == 'status': + return self._status + + return None + + def endElement(self, name, value, connection): + if name == 'vpcPeeringConnectionId': + self.id = value + elif name == 'expirationTime': + self.expiration_time = value + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_vpc_peering_connection(self.id) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self, validate=False, dry_run=False): + vpc_peering_connection_list = self.connection.get_all_vpc_peering_connections( + [self.id], + dry_run=dry_run + ) + if len(vpc_peering_connection_list): + updated_vpc_peering_connection = vpc_peering_connection_list[0] + self._update(updated_vpc_peering_connection) + elif validate: + raise ValueError('%s is not a valid VpcPeeringConnection ID' % (self.id,)) + return self.status_code diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vpc/vpnconnection.py b/desktop/core/ext-py/boto-2.38.0/boto/vpc/vpnconnection.py new file mode 100644 index 0000000000000000000000000000000000000000..cd8b11a62b7e775aeda5b410c996b9ada674c947 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/vpc/vpnconnection.py @@ -0,0 +1,204 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +from datetime import datetime +from boto.resultset import ResultSet + +""" +Represents a VPN Connectionn +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class VpnConnectionOptions(object): + """ + Represents VPN connection options + + :ivar static_routes_only: Indicates whether the VPN connection uses static + routes only. Static routes must be used for devices that don't support + BGP. + + """ + def __init__(self, static_routes_only=None): + self.static_routes_only = static_routes_only + + def __repr__(self): + return 'VpnConnectionOptions' + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'staticRoutesOnly': + self.static_routes_only = True if value == 'true' else False + else: + setattr(self, name, value) + +class VpnStaticRoute(object): + """ + Represents a static route for a VPN connection. + + :ivar destination_cidr_block: The CIDR block associated with the local + subnet of the customer data center. + :ivar source: Indicates how the routes were provided. + :ivar state: The current state of the static route. + """ + def __init__(self, destination_cidr_block=None, source=None, state=None): + self.destination_cidr_block = destination_cidr_block + self.source = source + self.available = state + + def __repr__(self): + return 'VpnStaticRoute: %s' % self.destination_cidr_block + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'destinationCidrBlock': + self.destination_cidr_block = value + elif name == 'source': + self.source = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + +class VpnTunnel(object): + """ + Represents telemetry for a VPN tunnel + + :ivar outside_ip_address: The Internet-routable IP address of the + virtual private gateway's outside interface. + :ivar status: The status of the VPN tunnel. Valid values: UP | DOWN + :ivar last_status_change: The date and time of the last change in status. + :ivar status_message: If an error occurs, a description of the error. + :ivar accepted_route_count: The number of accepted routes. + """ + def __init__(self, outside_ip_address=None, status=None, last_status_change=None, + status_message=None, accepted_route_count=None): + self.outside_ip_address = outside_ip_address + self.status = status + self.last_status_change = last_status_change + self.status_message = status_message + self.accepted_route_count = accepted_route_count + + def __repr__(self): + return 'VpnTunnel: %s' % self.outside_ip_address + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'outsideIpAddress': + self.outside_ip_address = value + elif name == 'status': + self.status = value + elif name == 'lastStatusChange': + self.last_status_change = datetime.strptime(value, + '%Y-%m-%dT%H:%M:%S.%fZ') + elif name == 'statusMessage': + self.status_message = value + elif name == 'acceptedRouteCount': + try: + value = int(value) + except ValueError: + boto.log.warning('Error converting code (%s) to int' % value) + self.accepted_route_count = value + else: + setattr(self, name, value) + +class VpnConnection(TaggedEC2Object): + """ + Represents a VPN Connection + + :ivar id: The ID of the VPN connection. + :ivar state: The current state of the VPN connection. + Valid values: pending | available | deleting | deleted + :ivar customer_gateway_configuration: The configuration information for the + VPN connection's customer gateway (in the native XML format). This + element is always present in the + :class:`boto.vpc.VPCConnection.create_vpn_connection` response; + however, it's present in the + :class:`boto.vpc.VPCConnection.get_all_vpn_connections` response only + if the VPN connection is in the pending or available state. + :ivar type: The type of VPN connection (ipsec.1). + :ivar customer_gateway_id: The ID of the customer gateway at your end of + the VPN connection. + :ivar vpn_gateway_id: The ID of the virtual private gateway + at the AWS side of the VPN connection. + :ivar tunnels: A list of the vpn tunnels (always 2) + :ivar options: The option set describing the VPN connection. + :ivar static_routes: A list of static routes associated with a VPN + connection. + + """ + def __init__(self, connection=None): + super(VpnConnection, self).__init__(connection) + self.id = None + self.state = None + self.customer_gateway_configuration = None + self.type = None + self.customer_gateway_id = None + self.vpn_gateway_id = None + self.tunnels = [] + self.options = None + self.static_routes = [] + + def __repr__(self): + return 'VpnConnection:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(VpnConnection, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'vgwTelemetry': + self.tunnels = ResultSet([('item', VpnTunnel)]) + return self.tunnels + elif name == 'routes': + self.static_routes = ResultSet([('item', VpnStaticRoute)]) + return self.static_routes + elif name == 'options': + self.options = VpnConnectionOptions() + return self.options + return None + + def endElement(self, name, value, connection): + if name == 'vpnConnectionId': + self.id = value + elif name == 'state': + self.state = value + elif name == 'customerGatewayConfiguration': + self.customer_gateway_configuration = value + elif name == 'type': + self.type = value + elif name == 'customerGatewayId': + self.customer_gateway_id = value + elif name == 'vpnGatewayId': + self.vpn_gateway_id = value + else: + setattr(self, name, value) + + def delete(self, dry_run=False): + return self.connection.delete_vpn_connection( + self.id, + dry_run=dry_run + ) diff --git a/desktop/core/ext-py/boto-2.38.0/boto/vpc/vpngateway.py b/desktop/core/ext-py/boto-2.38.0/boto/vpc/vpngateway.py new file mode 100644 index 0000000000000000000000000000000000000000..80598109c32c9ae9c36e0a50da4877aa791f96a9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/boto/vpc/vpngateway.py @@ -0,0 +1,87 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Vpn Gateway +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class Attachment(object): + + def __init__(self, connection=None): + self.vpc_id = None + self.state = None + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.vpc_id = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + +class VpnGateway(TaggedEC2Object): + + def __init__(self, connection=None): + super(VpnGateway, self).__init__(connection) + self.id = None + self.type = None + self.state = None + self.availability_zone = None + self.attachments = [] + + def __repr__(self): + return 'VpnGateway:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(VpnGateway, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'item': + att = Attachment() + self.attachments.append(att) + return att + + def endElement(self, name, value, connection): + if name == 'vpnGatewayId': + self.id = value + elif name == 'type': + self.type = value + elif name == 'state': + self.state = value + elif name == 'availabilityZone': + self.availability_zone = value + elif name == 'attachments': + pass + else: + setattr(self, name, value) + + def attach(self, vpc_id, dry_run=False): + return self.connection.attach_vpn_gateway( + self.id, + vpc_id, + dry_run=dry_run + ) + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/BotoCheatSheet.pdf b/desktop/core/ext-py/boto-2.38.0/docs/BotoCheatSheet.pdf new file mode 100644 index 0000000000000000000000000000000000000000..474dd65062243105b774b2b6bd4c21db7fa68254 GIT binary patch literal 48109 zcmYhiV{~RsxGfsnwr$%^$F}pvM#r{o+qP{x9osg$Z@#_nz32RSM%Ajds+MYwS@W4i zt|%r!&&frz&VpKFT2f+M9CxD#`5%W*0G7O^}z|_)6#NM4q zhw0}47aKDJ2Nw}LH#-9xn;s0KGQip1)yV|lOvLiPr;FO#x%^!1OvLtIT|fW^U}yT@ z82?rO-?1btZCn6OM2r$PMlJv`fQh~7&(N{}J98HcB4#dT7UrL;oL!s%Mz%2S+1EZZ ziHAJS$A1Mv^o0N*L_!X?P-E_|F_=vb?rH>c@RBRC-mI{FfA_Lwi+21_H{X2bXi}OVq8y^}O(P!*^W;agcEnqx6 zKX5)@bdQ`UGIunNGf5nqC|eWE&SU6vAHrOc(eHH-FxavA=33sp#5`2=t1}2bC*Ozk z?8a$C$v~i>>uOL2YTt#REX{HfYr%=~B6&JK~9|r5X@Rv<)HY_P?3M^+U`4?>7*2D-44d8q3{Ap5R{P;^#|09?% zdFayeq`^Bw0f6Is@=l5M z=w7HUB$aejQ1BX+q(|8zRgNpQqkpJ0gvB%$#s^*9;j{AX`_jX&?xdY8d`7{f*f>@k z2Ker3yyn9a(Dcy3;y?FBtFdJfG3oBTc`t}?V zZ7yH6QG%#x+*4)h>lmqxuz=zuHj;4j6X2j=iT898=`nIVeN1H#WRpEmspjd^z5dRf z{K;(ZjBv8-S@MzqDV%f#o{9&c#AaI7BUUW)_p|{AA))+pr?x`Bgz3rK`^e{f@i+$y3xCP#i(8VkwP!#y3RrsCVP`2GAV~)>jH+JLPtu!E~)M z7&{E~Ff|`*N+9W0-zAWlM@dKs@KDIhcsP1Y?iFYzukf0Ge>;|}4yC_lZy4{y2OhKU zbX5B6OUysRfQt|uK2yO%mT=0;LeOH$`!Hl0zUw@3*HbbCR#z1&I%Qmi-!r(VLEXSF z-I&%MGuj714BKR2*N>Ge&c6oC9?XMk-A+`aT*>0}?38 zoAY0XvG&sG!&;I_c=N1tmU2%?5~%d`{w_kY^vzhaXVTi~DSpl}-Z9vA{HBYed!4s` zD%n|OJj%JmZxMY?__#-}AIJCC4Y3(52U`3P2ls)RjBXGSQJPhUqn1rG=-!776nKW* z|M(Ofn%)cZQH3W_ zQj8zKV566oG?-j_0OOSpma7OHxdp^ogbP2U7)y*8YN6f0UeFloicz+xQ80K5qAK*- z>~dqFM`{l7{i_lGJH^<-Z~}!R5J{{t)ZP7*$jGmt^r0eE8lTDFF+vX^Y-&PFMkP+# ze<&`E<3*3&A&Y^7>O0gbT6|00>HQseEiJ6W!squHC~-9+81bY42bL4B1NK^|C}*T) z@E7>v)~qLHy94KfI_11X%%XU4uVOjKbG02Y2S~v+PBB^&JcwC1KOsI4kV(}1VGq2w zphc>mIj?#9_r3j?aO}Dq>@iK57lEePq+Qw!Q(q{9zjB&k10C>E&XRR7S2cbTW|Xi_ z!aqb(f9#BQ&Qso7nSzU(uc$^vn9)I98)gM*t#a;5Gv7~T=F0VD?(5z;VlR$LPJ(iKO+7u2(02( z30%rYrp(^MqwM2cwBxI3$VQeh;@BZbQ{X_zmm>6#DGSXmpn@k4iVaHla4&NFfOsH* zU+hr!few=APau~5umOqDJ92y|5GyLr*;Jx1>Ip4@OqWV%u)5<3@x6e<%f826_VI6j zLj3;UY!(y;MWkP-;NT-lckcPO{fSUqqf2~`ioTcY`B{$n&)E?|X0s=-i#g!cLHWVt@t1D-!Cwnoin! z_r=>pL4;nW}4VDnX$@Sdh-xhgG4r8BQRtZoJQ?<;0 z?X|Zcqr2%Dzj%yJ@Sn|(MUW_}7&8gJ5So+pDEtVm)C@KbbOin@X50QZ+gK9IE|v$y z5eN&_O4`55Gemkrtg$GH0SiJjaUzhe#aT^{|`fRbP>m!1983h5YmwC|PC|Y+62o zJ|}PkY|_o~kSNRuij{4CO{A^)#BBu_CcLln}o*!do-!A!3BwQ?$$@u*?oGuOMO zkjvo!sCpNDRLICyHfn<3w#r`xT5}`q+k*7R|(d~!L_-Vg|1#(TSENYYR}L%krMNU`-Fr~Qjr z9O0xg$%!T?9hHSH@AW|v_XrF993g_}ztVuEo#Ca7L7PE+H4v!PAh^P(AL&6&RXJY2^)uqv+1-5bm;^gp?^gI3X@X9tNo@#>9-*=% z-v*Rv5YEXw_?2y$c>w9wWFSLKiucxiE?h*Bd?C1AXcD;I4W{Z& zpl{M4f0PX=2gJBm2l_LH1|msQX+6hDx8b;{p0nET@@}|yko@>E&4M_|EA${{-?Ufw zWY`dy(L6MRmBu}5>UuVCwZAC}oSDvj`5W+A*^ zB7%Da2_|R#JKLd6(@zmgs1W#i%+pO2yB8V2d&-d?z~b)iR(C*IjwZPDX%<@ zU@PK{1BnS4B2)J~smCQ}Kcr%e%H=?cumj&c@H358hMBaQQMHz(Ij0L*`={Vi)!|d+ z(`8dTFd5OtJIc$=(^s$qqKGaYs-3kqM>Yi0(ik63t_HkH117l*4t3Q%wRE@;i9?Fu zfchemV|CGr(UuXB?2zsrkoL>VL#1)w)3P|dw3_1INL(3T(-3eb*)H>y)8b&UPd7f2 zQbXo7J5X=2HR)>cT;k4%M1EI`aHtZ~B0+6GV?*jAD$Qy;w5M%(MX}XCoM=^J>Q{b0 zyf>mZ>duZfDRH`kxleO zk80~E4W6{zQ)Wun%{>>EBs?8FHA8s`ewMCRr0BFvzj;d1wpq<53d`iU4CRvtrNU`j zp=+}cH`w23V~|1Z=Um__*P0I`GkF;*zNN1RIAehkqRz~$LA6YO;jpv^_hmDCXF@f5 zgg#6Ep1iCOxYY-8-W%?7#$GInm2!d)oiu1Cwc}E?rc7W_Nj(!;1?$As z=4#@&z3NWv>!G1zHMhg)dZpUjYh?Dz64ShfJuOTZ(Tem+fx@7)N9&5$83Nr7wp&ez zkUTgf{CW!EBl5sfOl!DENG5Y<14I!rjLeV(5~bwQOv_g3|d$$%1g=G26-&SmT`j535l>A)a96>l4|4Pr`Jxc672#U zT<^B)S}ZAKhg?On&AC>}R!q(FTDUN`QKj{0rY%t6!HM@XODQrYts9&AIBN@CGgW3D zb23j`I%|xXblH6CrPe!F4YQ0K%>u6%!8Y4|{bO43!Z}ujFEqx3&bssl}a0LYXA zXL!k6;?e%=wLqR)?rJe&xE3H^)^#KNqm42A(IZ@aqR;xe(X?fq6%jgiJJ%4W6Ppz2 z#U|#?>$iYHJlEd35%0HxH;(@b+B(y3#4<*^yH8c*WesVBdw1)8tx)P^TXt?oTHxJPo(m~ zDj19$~D=7n`KX#R|OCIB6#W z`<%zjmVj=y*#-g}6(pvx8#ad(bNo;yfrQsnMlvK(H(;WA~Xl zJ&|Z~1~<|lja{Yp{<0WtsWBN)$v-65@e<6$pDWDVGzs0Donk2S;taHR_Pv!V_DNU_ z2Iqvh*f5ueroWU~q_5b7SJdS1Y6=3T2s|Q7 z%IZI`sq5YegWV<&#gN}u0f!1mP9oqBSsqMYD|&({W*87AslI^1R(=0eJkE`p!sm20 zoTewc=RA3_c}^^TyQhA0qT8QVw}-68V7m74B?hiWCtEcc@^RuEBfEsNzT9< z#3639Jw=fCc-3l>GVFw2j3M{7fwxYuH!hpe#dO<@cTLLX__Ui2ZCk=UJG)cV7V40H zFE%JkOqYGkX5b4v4RiT$K8nc}ugMlhv!LdD@BjBLu2q7y=KmT<@kA-t=lj5J+y*)7 z%>NY#dU3WXNK_-Ooq-R0*~BuSi)2!&qfv%_e!5T8lxV7#V3M;l2Qk|hu7PV`!!mql z2lBSTBzeUKVAdjw=bB0)?SR+7o$p~M#$8gIABB|?{c8=pI>o8Tu_|uWjPb38jF_Lj zyw~^?`h0H$hzCylc3eb}gY#`lu!P;&C4sCqzQ>dUnc3Y^P#aQb?LwTVXYJL^NIIjY zXZ3G^m73`&`-j(ZMbjTOu?M2N`rXS(1INqhgl!!}6WSF%G@SjG(V8~&2lypSFjb3?q@+{Wb_|ntu^n-xH!qIV2HC_Hn zzb2#GG3_^)@HZL76~EOP*S7;>_wepk`e8fbB69j!-8VIi7>K6#LKuioYRG!9?QA54 z^aK`y_d+dF7No@{Cpp625ck-_<`hF1RxgLcqsf_zTnx~hwEK}92pM8wvN+7~@rTLf z^k+S{N8}WYf`mkfJ0NBWP)5kVxQ7HBltzK!TZqP@Lg^+QJ1uf7(*m_p+(J5Ow0F1{ zq!Lc&=c(xAqvcwMM|DME<_lZ2>R1WBH~BVPB4dkH%ap2$tcmvvk>f=F!^f5l2)SnK zc)s3H7gEt3TljN{QA|?l#NVn-JNcVt{7&`0RD%Bb+Zzsc6de%XlZek`Eo$$x)J%h? z=@74CKpR|LMQ$h`;MTdi}NC}vZOHDKX+0Z&?=02ZI!Qc#LB*@wIusekzuokU>C&r zWJUolScSbY`XOA1l>th{A-ek{+TI2}56t6^VNR|dx zIkGzZ#Su-N9Dx@yR{O-Ha5s_W{0Ag-Knutd zEB8h!-gZ+x6aF5q^Cpf_m?S&(bkT=K;~-xpq<_gw(ME#gvyrkDo}SJxFUceq&4^E~ z3)z^AWT*keTv7TRE~pm)^SPTQVrwlIJf*r&-#yP_Y_M6$9|3rIl z%s73UCUf(C`KSS7>{%>EC$kgj5`X-80{)8*%wev@QMs|J%m! zbSOK-NFtouT=H*aQ9E}B8nPiEYvMFT*hY70@-b1T`H;OvU-U80mF>VkPb5oG(cl^S zrk)As;-#3^0n|p;E7HrL37`GNMNm_TZFrtTd0Ik9<5FqQ65j)vQf%3yY zE4f)wh=l3_>{uc$Zt?+r3-0EFxi*peiIty9_dMcBN5IPj^^x8f3otP!uFT6-l*IEO zcWfj_YPT*#H~KP6`6JZ|QHyW*aN|!lOHFa5=7J3Fj{iT+L7XnLn5sMr$;gygS2(#p z#L%&s9_F*F7b(YY+a?vnA=>1BfP(u|*uuk7KuA#{am&8dflR$()853K66Yuwf_&E- zAckO!1BIB(jtfM&2W%1k=u_%I{R1IKh;6XXolDw#auhY?;KaH$)gNq|O9&tTk-w8jW^Vp;=fN}-58rlV|K!Jqn#KY%Iqy(O#Ncc1Mj1$@Xg~Ng8Hh|EO@vas4 zd$~ZMRlvhIK#``14?`wjZ&zE;8Lk@O5pp4+S6ZMgR`pwjy`StCynrZaBuxO@vEeFE zM%)1JpHGrsxsd%`&0yoQAdLD!Zig^Caq!F`M$YCGcodF1OSyy>r?D?y)@3=0e8|W{Zp)%(vRXX4G zqkA4@0aZ-udWGGeARMxtW>E-bSDhPb^LP=QfC3d-w3ELHH-81WdNdjO>WXOOVHee?TAYa0g1RdFrAqeAu?NiDK;q`|!#A zss_1tUWa6~5!9OW@be8Tc1Q%?Ea4wIX-gy2w~UM^y=P+({qGM(HBnfWhIPxRoEs4r zeqGERpuxwEkgsbAq}m(CUid-lpFcvprPcoqp=Tl%I+QybT`jv2fXXQq6Oc@gH&nDO(R^NL|OgiLI6eC&3uVUPjC6>HZ0rEWNk~rBi z{$;wa9he~X`JcyGN zIi8ujMIsnx+F6#irLL!H^oTk^PB<#CA@Dd6-Jag?lM^_vRmRaitc=9CplsFg2DoYZIb_&u!R5S?>3bZ?oBODKM@S z+)cZOX#|6Yf*pk0&F|i}V$OCxxI}8Ea3fJ@5BvJIL+p!KKG zeM&*?bKAr@Obv26ud!?Sl=N{w7wqD1N2v!}ZmnUaz9q1GDb(_WaW-YsS8aiY)?YLfjHp;;blCA>pIp8wx~! zGtpUAl`CW6Ja7^%7P)>Bce@Vte#W)}=7(WBi68IEn=GX=-X3Pvw=5#EK7Y4(?y z+J-Uctv{S4^Fo9e$+SW7nk;c+M4I1313#={=(MSI4?4;6u?0l!d`9+O=nW~Vj4nE( zvn;#OrG$5_BgUUv!Yo4Pl#o6o)rr4GikQ;3^mSyP><>7XyZBy!Qd1-ekgWl-g{%33 zG)6vo6nT1i=Kx2R6q)&Z#$JR{Mg>Ul1CZ6qo{<3TbR4y(p1O-%0-|Y!OX4}N&g4Xa z>TN7t$F|oTLU;N`MU4%}J1~gH519b*pgc#gMw3>4TagwU1)xEzpOEqN1JEX*0^--G zPz;X87!NkdMifWFQJGQv&{+>gSTYrqd`rDaqCf>c2u&oua9B~+(i0MxN!|O`j3tV* zezNQHTu)gfo}$N7)6vW8>C)n=%=O|0o<&8$dc4AItB~-3RO@fN* z1ysN1nK@zKW|1vg8chOJckUBn;G9GgBK~<_RZSj=35pmr`;f zU&8q7yS;8d8FD@%IvcAq-?znFrKTa+_8OtRT4Tg%CgzyUR$O0>XnXV$UsM}i;IoCc z`Un%!H=CcnA%xn5QK#RF4}@L@M)cCh7t^u0Xbvzg@~9w@*jHADQ0r|S#A*noLWKEd zm3nh71KRMP)5YZ6O-gb~gu&U>{OK4YSVV1=Vj@1&H+~i9r>0>%3#?ufj!vcWNq5N^ zsH^29_F{Qb$292It?@-9yir@Q>5z&!FLNzyTmOlha>vqDA+xM}IH`0ZgoZ3eoOlj_ z$2#++-m17m6VeGu1aPh9BN1v@(o4Q_U3)DluO1X zGHz>$D~XPWe z^KB&d2{5UeURW4i<}(9yH?`v13G{ZTk2+EB-?(S|4T75;b(Fo^;$I>fm)3JuJNv*S zDiG5i<|OB@G;z44CTC3*ze5E$86%=qd{(T;LT8D!i?2^(vsk+i+^1DY{L>*Wl^gctvoJ)54A|eB!06IUa1q#<26wf z#1LFMgQ#nFD9siDYnRi-Z?;JXclBZl5|3uNKa4`CxqhL`Rrj!Y)Y41ViWPA<7y8bT zI--GDboO_GZQ6wmm@Gb1$_CFr2WmFYx`_K2DdJ7>mt`+K7}!WF_{Z}}QJ?yeW!Vcr z87_B<1ZdU^+d;&rqJcrYUb9=UM@b%eXYeLd{ zhrOEeZ+oxXKY5mFP+ciu_|#^l?tWo+8!(l9%2 zFIteK^g%16-LC4tQ3<*53x z6X5t1m|*=;QAc%C^cW}41Fnz}I?v$!otw~#P*1RHma?I`0FFZC#8^@9I4FMfpL4|K zw9(s8Sj>lpPh5%3>3^IfmFxO5V74gK$tj&5%w3v)yUWiY)F%k+dVKVzhqK##&H) zg1>6@(y|-leZivLLZpX^#0xUR5`#@b&Mf88zS^!+x;lO=7%~<%ct^PbDEWA3uF!%0 zvFO0?=)mYEw7<%&xbHmos!i{HB>himX?))oIchd_($PXTKYk%~(i5o2RPGlZ^Xn1n7PrIQf&nSk3N5u z@;E0`s7dN}NHAMl;0lgU>gIa*90_`i_C`Ufz>4Cs_acX%dZ@qw`x#anUI@_Pi)0Xo5Z0I#K2@bCo-JZ{UxcV(x*r>4X`X=7mw z-)v=;ggqMizGH(Jgtx-2|C|Dz2v`&@hg3edfPpbM|1cR$z=ME$FaIK0v%s0rtgHq^ z!vM27@FwAyP3S3ulR?V~{~L5N@`nsl9m9-69n8D5zfcC!&3mHinxwz7{z=*HwW4T5 z?>V{#+emLDvUv^U!v}H13VDxKMr+>~szKId8cr5b$CdKoNZQ4cdeOZ~SKb@9%(7cq zRU%w5sX_iOPhqi=HJh1NWSd-*nkCj}JNqRmq>BAS79X1ixbs{r6U^doGr%;Qv?VUZ zT}F=-``WSWgA-x#yldoCYPT5g1qB!_CUdtfo@Lj zluJGOV&L#F?$l1{_{@cXsiK273#13?0zc0#)W$8eRtOyZmMi%`57}$Ln>{*uWkAcb zOFP4%nyXkT*_{@_KUbZid`72w$fx$1&qi@v4^}&5CS)L7el|Y zM7|T|_&_jiQDh2CZDa!U^%TB_(Z6tpEsMZ^OQ1CV$iS`9O+U#ZmRW?Llg#MLjbF{t ztV8Lx(i|;hGf*7Sf0#~uBjz+yl&0{jCs}4?J=l&Hk|htvV;oH1dW+S$3gh?Tv0c!U zKFoZ6XFN`mEJr|_)R7s^=>@T|OOZ0mV47oRY5(>@QRS6cCRN9Skv;72|0|y#w@CCn z!%i8~@A=O2da)Q1$F{`;5w;S#(P!U4tCIlDLc$@xALv_gpY=3I9_uf|p2; zEE^~PX^gy*3frm6XF0s#{b|s0lsqYtI45kV8M-xe7Dm^q5J_*m8G4z72vJPb>!uJF z2=_PCX>%P1A~2j{dwm=pEH1?cw`6tbb-?ad`%_?b!m>9=#=-Kho(Of$r2FhddR$c& zjNxhLRr|8&xA!jZM}~o&7jtT%jm2^#7Hj^lzc1(q{Fy!C8K>Y>xRi{Ib;7S@sfNBD zF0yPeLB}X5fUQ<~pcL1B84`Ycq<%;P<)OcIEuXpu^)!(H+^8~0d&I-`qPa<1XpPH8 z8h==%%fae6%j@kj%ETs&OfZPvjrWNJ$g$RG<1bi9-n2Bt<0ME-YMOzJ?&j`M57Tym z*`VtKD$ss=5`3-ypbw2U#TCDu5`;(RgfNNYfpLL|aU4li-!WbfL|s_vObaq)XR|%t zv9?C=Tm}ON7qY`#D9%A~o;qC6ruuKa;-#5|M`)y1gbP6ELM?F#@Ipb$Qxps`bb(iBf@^5;sgjb9Dd_1TaUCyO-!U zwRX~cwoOL@0|AeOvl2I5wEwc`OLn_oYx?7ioxw!4 zG4BX=<0%?BuP0hZYjQsG#PgQdNK2NPOn?l7-~vcXl3O{)Np0Q`zzDB3f9SHnqoOQy zNOcRNE4t_JOtKk&>Tr%ZN_Uk& z$)<#~0X))D0sDXj$&boTNU4oTpt(IrLW2tr7R#g#noN$*Cy5Rop-3q>PtbBlCY zY*vCR`yFoD+)x1Y5u5Flwca0z)R$a?y3OJiEkVs%PSb7NW-%_rh( z_1|liRc>8_uS?idoYcL;ClsIKhWn3=DSP8cM(F-=35@pn+#_?N^*ylEd>lpf^_`Q`lJ4JOZ2qi&pX6Z_X(z?MV z>(G~uiPyj%z9r%&B8EVF>@SCOvrC}VTv8ER(o}ef2O79>bDjryxl-nYP%i=NIDS}3;BneB_4ZF^=rjKKHjRcA);Jm>3 z%EQS%{S)DlMn%bk_7S4^FtXG=m{~w~eVXyz)0}DB0z{~&$(zR*Z z-DG9ZY% zEApDJx^K$2n-+jhPch0YWjVY?aW8E550D6Ha%~=moeQyuBuD+W+E7DFRgZ)dTj+3?B_)?i7ttNgk{35-Hr}U- z?5&HQP2~Bh?Z`z{m4n_zJ~5&eQ&A(>0R%S0=yt#lniA!tC`T49!lxd>wXBA&AxC!D ze_8N*X)eZVq9y zA_NBVbx2u3amYBwc~&J|_{le)?cW%1(ihUka$IGv@ld&1*$9r+ye@Ok=JMoW-2-dH zW-4@$O}U8M9zh6jJWOZ#eh7WS*}4vlSPD@|n>6yZF~A&5gZ)sTq;SzpKZS4^=9C`e zlbCSos5mx=C27sO*396doJC+_+de%x&|`n^ZTW((IZu9Fa{i2LisYL#Mui ziqLhqBP}F^sfnzyJV~gsffx76WM;X=q2X|Q4g*j5Gk05%-~flZQP@c=ss~<<2;yf^ zLidPsfvigbeywINbwI3DjGHpw(L9G(-X~coo{nNLt|Ob^{NmmI2q#;=`sjS{w0y8E z$cn0AX8j}Z$7Ep9KTdaDAhp-V3~c?lWgaT(I{k@Fq2Hb{w8xP3q%Y-Iwl zz(py1Ysy&Jkh5YX6WlaYz0bPqkHd+HX{a9#)EXU>_5V1WDk6{BNb@J%jb8fm@0c4v zPnn?NDF=3YN5_GmY3-pUQt#4OLlsbwg%zIF8$I+Do;}=@wO*8;il2BJh9~I8n4IDv zCP?tDN)pMR@bPhy52>)A@Ux>caac}DqTglTjpj{n%{Az5(8nCRg^?R%ku z`Pjm1DXfO3;mTT@T%Ug9Iqxk#HnN#%AinkT$r-rmj)p`@kDBN~4`HeghLRy4wd_tr zKleD}+ZQkCHm4@F2qPq12p`q|-v%c#emW6daAY1C-w#?_sZ!vS&mbS`mw9wzgW?CU z2!08t;xr(J4p`A5@}%)D_Y?$|Hhrabn7br5u!&0hVHwbI(vD*Nhweec7~w4nQaK5Z z0VG78!kVbvL!dOa(e??v+<;;A;Cr5lU*Je_2(oJ<+VLFB?L2QcYdH9*_6c7?CO-dp zn2NO&22ah%w17R!?z&a-cX&s($PZoRGM`^P?{&(!SA2IWzICg-E73!uMiI4{b=I_1 zS?Ok{{q-_|ldkZdIQ)rv{^vL*|fD!HL*v;qsGH_YB*;&5Nx^M-14%`8KUEe z_@j8fcFIMaS<|!jwd_J2l{=zBrIbz?wMlHJCR&J1S$`Z$37tQVB`6Dos4SDx2gx+b zZm;yph7Tdi+%R4fgo`sOR)#rx)L)q48gbz~Qg0rGo1ogbbo!n=PZ6&@C#OyxnSzn9 zo07%Xw#36mJ9=YR2aE>ZCQ$e2N}5oP8pOl4)3#$j5w0E`h8_I%Kl4)j<$vZSirFyC zw*SmaHakD&rM;5>H7^w)?YEEA^2yxxIQ!D%&yskfE2hEtgY`E#K&8?6&oLhu-&;|_ zxk6Jf>)#9MwzPu0Q^o%cp#8>cSv;lH-X*@?O_pY*Xm`Vn16=f;>>jNPTw z?a>}6`7%yj@;T2((!r=b%bBELk+!pwOL}mtiPDk>-8w6n1fYod;a7h2+^HY~a3&1& zd88op@)`Dkfy%u$lh1b53T>5nvzO&Bi)RbK&ljK;mw!^O5ZwC6Baa~avL$bkzZ%3% zO-aF+vpr*?ZA|>>llue5TCr3g(Vof>aWzeIL3D)sngTiCo>^A@%ya3Oaa3%}UkHdi zBE<(8`IK1zTuk(7y5GJ+coS)=pz$iHt3o2~;^8ujZjzs?xK^ufYF90khl|6Rvu6$; zKEuVBgB+ZLkgkYk&^#mG;-%q!j8X#47H9O3P*TjtcO)n=JmBP*@E2n-L3j{%(@L<& z7X>_xqm$lDpwm`nIQl2bv_8YCmKsTLRAxAZ0Y>MhAXd@`j)Q6^A6b*;iZEQ~3rRuN zihq;|nE}`6QhFc}VDFWrre8O&`}oxLy0L{qK;-%I0lCUkcS9uy4OFD_3b{2DuDaTD z9u+U?uJmu6WSE4ki@FtZ9$E=SXPk-cyzvbeJOc9drv*#tFMN*bRYj*0IhS2!A2-I+ z7R;+o@oxXQfkys(dH>;2CG(Q>4W!8|wqn1_;Ji`Ra>1&;9sCC}xY1!rPO3H!4^}N% zE_(W)OGc|a*R80g#xQF|#Hja?Nwn3R>@29#aY+tHP1Vxg1qTqeG|&EZT#RjRK_BV4 zW*20P%-WI!PWptZ>ZR7_`!I6Dw>~=9oWx>d=`Xt4ZMpIk7SO?MJeSbKRaO1^PDt=t zMM8|BawU8FEf?(LjzAygGsb;2^p%K%qI(juvjw}2WVJb4#pfU{{x4BSTL}Z)Ovx~G z$)qCUje+JY0RV!l$X8-JE20kIClXsa|c zX;XaLnlP<=%L4?MVi7ul<1~Lm)gPhJX)zN`KWr4Gwn*C8FDBU7*p<|(gQ%3zJsF7_ z3RtyKb<})3fb}pop55nR6c|k5s`I#4y9nV4r+vE7U?Bwqz`GFcJ+;ejVO~>ll{2h~ilvL18~Tl@~ln2FO_u zu-=zoON9ETdsm@5VpC+s+Oym~Y+D6<{;0ksb%XR{T?v&EcPlw-{%pb~fumqC&^V}U zdH09-X_HOE|2Fj>Rn|_i9wmE^eS&wlFf`a=x-K!?iUW!j5|C?MFP(xT;&v0Cx(CcrpvHAYIef6K{D!$o;xzrHrNK+} z`!lkwjUy}nGM9y@gpTIesu4b7an9bx;w{3pJ{!~H&7QN0vmZgy5G2N}b^#mNYsHY? z+enl6L(%I_PtL98X{RZ9(h$8 zdLULN)TI|$C!Riqp;Qc9J+?(E$9BNy*z#Jh&qk`rM&_&$hJ6^_5Jk(7^tNQ?@hq#k zY-BnbbErwu#&xk>2ci?9a2`f^o5wf#X89iutG^V!ZMS@V1dCT9(S*XspLd!&Cy_r~ z<$tEj;uQ|I;hk4QCs}H|4L#MgV7Tt6(Y&qOzP(Y=8AZXb1~G==dQr!^GwwT*4xGz` znjmzhb!Tr=Nv!YM(*i`)VZ1o}qns9!=IM17lc`C=+};AXlK@`({68o88RuXa(WZER zGh}O*#D4a?DNwq{xV%GC<_Of?G8{y1yC^}A_$tGU9wKdv* zc=78uyPh4B-pzatr}cKVYtw36|NbiyOK%cU8a&-Cb&uHDjhP4G{sAuiEl2nu-K{O% zE&XD0T=it;`YkVx<&t-ZqIB54O@InXgbrqic)_qTrl1}=27W>QX-j>1U4U>iz&7e-`>+EK&+{j zz5!<01<57Ug-iz=E@>mY8QmzNT8+xkOnAyokTK4v^4zAeNeD$A+*O4~Ac`Kjik|sj z|7zkoilv^W}30Pckv#u}PAaR#jb>)yRa zcmRTowXSl0WePd#r@W{;4jJAfiKBht$`K>#Yi%OP$oeNSwPJ{<985PZkKzzUHxsf>f#s>C}xhNXfe3#8dRN2}*{=47z zoA_rYCMM=!PM-bb!9LiLnQO1L^1iQR7Oe((Dy1>feQ8Ted2cA&R$a?`zd_&c zNQ{;pqI3x+_~L`wVEiFfx-I#)V5=V?-(#&&f}yXLN`(}ZPF$md^#D#X2~!k-1U&7x z(8&suz6%%^1q9F2$7X>_;zglFL_c_%8Z_`$Qn~QypLT(2#!%~2>2=EZm_~^y-bD32 zD6<#uagWD#iYWhIeR-uQb~!ReSIZ3cXte6)kqogZRW{3Z%Rb6=|GP#cMo00oowObhd$Y5TAMdgYCP^10D=C5zhrM%OJO*;8QuFe3s~lMe374T z3eIR)bVpNZB|yR1^tCQEG>hWVj5Qv-W@1xc?fjfO{-`^<)2%!5nb0n%$E#kfYtwqH zE;C#o#2I?pizgv{>EIO%jEwLaYZ9Ggy@#_QS7SeypPZ(gRHjhi8&PwASw-?|E7ZX^ z@{HXUk-L}vo;nxb!AM}A!Q_?F71B7t+{H>*fl$sE=}H{2F?Vy+e5t9aI||eoYI|(z z(p8(X5k-5z1U<8i4g8l`y?D(*Si|OVr~Jao9aOQ3G%1t{%~n4O9Ayc$Ej*)ULS@qYsGuoEM}@RujQ+~x>{`$yTfFjB4u#zeXfUI6)nP_GA({r9VHJ`G{*As*fSl^Q$G(`WIn^n+v^qx=kfd#rEo#`e(!iVXaVew3!r7jBcnE>m`p_W{N4PVI ziJ94KnYBHES7hT`xEUt>@xnh0mo;mOtN*>f&r-8zF2b-w;IzC2w9j@~XBrDwi*y#a zpmINMK22=T2LUdYxTjx+xC%|LWv@-_X8hW?1`bTM@Uh!HxPfdGJzl~P(#*tsxahX^ zuUrAPlo6X3J~kT+yv%VDR!!Bnf=y2?*8Gd3YNY%GV&xJ0T~e z4cd%Og<<#;-f>I0-SOZ z36c}o@%3zjsl2NE!g@Ns!Z!DtqW?)YWGUPXbkHgqO+Ia9^GbHgfK}7I7coT{Pjrk` z>K1L|EHDYVALbt7=wAy6H(C1RjyQ7{U1dxopC#j(qF`yA;!zsGqB8@c49y;c!jHwx z2R2Galwt}A)U>e~aa{P-MYRfsqKxSqSp@3H%|G@YQo`n=+zreTk=gSvpzd!H;~Ef` zzbHJ0FGqLWY7(OzVXf`W0&07#IQRSTzZ{m9Jj80mvvpc3#k2jiA+?I+aTkD}_7WNo zab^3sweMyI)MELCz{6e8hs2X`QH(J1Er_ozEH_Ksc`L&aJWg4E`xEHH7BwSJ_W;fy zqMv_Jz`$~Q>5NCNK(@X0AI~B@XTv3GVOU z<^#tM#8fYZZ}ruW{}W&4d!eFi=6xW2gy-wv8`MGgrsW?7=YNeX1_4_Q_{x(#gzH@(JY-CSS^};hK^NAXYuUZsgDX zn?-tj-Iu{HEd9@3Kjg)!5W>^HLBACUF&|zZeS-x0`t0Z1^>?fNa2_|xe|__OP62+I z&)cqlZqk}C^>^%I%G5ohZt2gEbp`;u6ULT`Apl=aoTf~D z;h(2aZx^CB8-G-)_#({RlV?FNmHw(0HZXTY+jD>R7Y%|tBso89vHxrn3qan1wln2Q z#YJ@CTk-JZ|MbNPb8Z~E5c9tSt&hKJb~g9Q)cQ>($Hz}LcH!hhzdQwzhfIR-$B=7u zFXbk{`Zb=&8+ZH6{zm z1RpBheBiJerx+Q_b?d`@^X5z^s%8KkO!eH?eDFW$9q!z+|Dt!~JgMGr&C?hG=pBI- z32ZmPB4q+wor_zMijfaF$WVFsdj;Ze|JKG?*GpS>GozZY-28NOunj_LAsjCHjp zMX5;nrAdQ4EeM#TtWx4t$4Jk}e6gcH%Koi-R z@?BMqfXU7ZrxWIka*hlkhJb+s%cgcrP*`YxjnS@p^toUM4W!ofJsS+D8Ep|?ONtE*L5mi>0`Lh zVhx2H;KrHfqC>}Q<@-g3#oBw7K5C<{&m^7Unq{B?nHD>6=V+x%@ygm^99SAnLG@qdGveen5 z5Qsqf4I!iF%60RF)Tlkt@IcqVh)qWYbCx9M@&}l+|2CB^Jh8BXtT=Mg>aPD)$Pso% zRmVi@b{ILCus?!jH8@6nxp9Bz2HuSNJ5Zh5eBA3;h3ZnLJ7NI*d2l|>6kGU5nWF$&SdnIY7S%rRkg?L+xvk`Z8-C_mrb;nC`{U#=SkI5C@$~?I!p-P=!G&4 z&;Lg6NHu)Wdm>y$MiX6BQKjfn@S60%>Z1q!9BM&iS^JNM)ue42Zryab9Aof7Su16% z`cFO7VaRwU2IG&97nn8x7^;Al)3B05%`2j+Gc;YBdgeIYj6pXbN zmU(J{mQdX8bVtrzK?(aDa&b3QuWgm%j}G}~{hAFO;^yRT1{hIqH3cRa61|&LKot<>F-H#eF^l#QtW?qAJ)5eC(E0QqbYUz#$kT zTcX)_qV~o&A}YN$C_Jf=N2ELo&i>zme@NxcLbPq>& ze1S~M>EEM{l{RUn!sw{N8g)vr9RdxU^Tx8zFDxz~#HoZS4ij_NxK@H(q4z;`lmw?-t!;|unI~sF1F{9C3#TZFBp$oL4@E!lmM zzA%FR0BPvjyqXj+xQUzjA86Wypf~$#*B9(N&l7qx55^VO?Fpapz(8WL&IPj-Wdt~d z!^x!R?n+YqG~Zo@H>yD)oFQ{PV2Q_LWScc2GiT`OxxP5^9l#alBnZ2Zjss;UZIc>d zS$tHkUzB7@^f)KX5yr|6B4FKsS_Zoz3z%WSzi@y7)q!6TVkh)QztTjQsLlgNMf0km zq8_|=z^=R<0{d3gFEnYnKXhC~8x7>*sVZVJds%xdHabr*(6h(%As`}0t9!p;5fPn_ zKX?Eso7K%3i_|i>G8F9nVxQA)39qDqoJC|@I?5i1T?5dMrj}r+mK-1Ke=`^Y9H_iD zj1Q3lezKfcfX~aBZKp4I>GRRJSLe_>R_8$2E^@&_S5MTI zdHjHC56~^}>xnL2QWKV%=|iT$CPLQCHU>on-=N0UpL z=iSAgyshDj^J?IkI_|PPtt)a~DS_TFMbLRY3JA=?`;WTHcbA#m#)8F(!N%mC(LK7od~XUQB*5szFwv z-)SC0s4{c66H9u;9p|=%U(v3vOc<&>7VMZyTKrY05CsgV2pF}NKpMf3@dmi98{XJ{ zY!N8ULJWZ$rIZ~z9iHftUfM*Ga#)0OeuH&dI05KKJqXQQVJzhy`wikTc04OpV}F_-z|lb3i!wW{>;^REE+AnA z3zO$egnU^UcOcsXaaAjl^dtYh)%UTTCoKlS1NtC?Vs{QiyeUQJXZU-FXhhe#w$>nm^dgb^aDFBXUt=NeWCA}^5vJQ7W*&?T9|sY<~KZN zdh>uYhIKmte-xw$MpZ4dxp#>J|06GucvnK#lK4#QX>fU>aXFd3XFa;a!Q9iqWN zIVLP;xR0Ed9-QGKl=4U4WDHQ4$!a71C3OBF4M#*9B2rC;W7;aYz#(>Di%LOPmZ4Y7 zV@(E*SO#$=#sH09m5mUO`hBM7ugYK5Fsa~;c39E_j8{Rh(i0Y{9Ls}+h+oLtxj;k0 zVYvP2T3<(ELcEZSrMiaPi0t0-ih|6WH!JO%yIJvJeb>tDtagJ9aaduR9AZ3;WB%e5 zb4gXL39^esI<*$v3=(~78guCxnJAx*rR}Ylygg-Nc74i)^~Mz$4(biW?VCDsUqCZM z6f_#`9Wo_hk}pRtoMwSWU+@6YOdO;cx_KcjZ!R6vDuZRtQL36k^ICH4y#1q6)U+bU zih3Lj*m1a07L>LBzF$P!AR_(zuphzO`afDQJhC6kXsQmt&~}_K@t}&q{@DKbO(OP$ z-Yq_;<*($zGY^1gbev27 zpJ!B${n)!o1e}Ui)L76DrSRNTr7|kL(=qqT&;}pf5$kvt;$ARLSH6>)!|u#?RK{=V zHZS2@nevun*)j01KhC%d>4!0ZOh{aikVMf@!muSn9&Rfle@RRIYR{r9!c|A zur3q>xG>IW67X#2Gyn$SuC-4NTbFK9wzw>YoS7aqB2*QiF?Sz2%wjI@Q@+XDIi(q)T{yq4#WW>qPI^3r}S`>Q>L6Vuvs#leABU*t` zyDer6bQmrsHWQ56xb<<+JYR7o5{A?+ctUb7rS83yI#(hM7G*i7Jk2pO9_{`| z38M?o&{lxKhSL7lPqaqXx`d0ptItZ$ijd_r9}{eW$9C|ixK!Nq{RwLe!B-ydg(zu~_otBx{=Bfnf>0~Cfs59qwKW`xZj-@AAi}MxORCR<# zWmoNSr7Wi{Hln_*S+O4!p_sJO9eAsoxxRjyaG|wBDu~j66&qZIaKnG!szE7 z@JJw>hgbFO9NaPa6b5K_QhfoQt8DQ@NP3dvYZnVTYxyk}EKA%>e6UpfmAd&zm*@F& zrF`{5ksMv1$5^guLM?0Ro|mxEwIVtB(Mz#W*G2dxo*633D5>fwDM~qtsqN@;xWmh6 zso*ZXD<1_So6Ar2RriJcxuc`#9`2tvd52J4uQ(@7i&L9}UQpC{=0GpM5P5h z#Xo`r(!-&~uI4gY68$ZCPR(XS_M?|a_SE4+g++%f@YPHn{-Z_xRmlt$>$F5+fGBN% z;Ii1B@^`E%0bYGP%9<2ro8dob9eU)>zmV7VGD5(WUOw0ZD)hroP?-Le9mC3?b(;my zLvZogn`mtN-LaKN9ZV~$)&k_kTMV^QXnQTUoLT=>oR}^~m;XeyA>{09c8to^A#q`) zfR;78vK5#aZn15xOm@hKoRE{VFtJ$~HWM+~-Dami8RTWfnX(?0NxdDs3>4lw7)Hnm zM&->Ei+snChb-3dl4WXw_UASQGi2-V3L}zPo%;_?N3`tVKR6xJxC#&W?a5sLPUo~r z1Eb2lbnAYP82gD~6z%`O=|FQ{`ku7NYuSWqdBKsMtV`Ej7PZrL!5t4#52>jdpMA)2 z4-v^Fs1s*qj&gImom8Xykx$7yqm5?4+OUv+lKYB#kwT$!$Y_ep+#IEiln7d{xNM(z zo{e)p(GC|WWRh|wa}$HK&f~1hPIh(V7J)cF!5z}~zwwDWa*-lnOb$w`QX&D6kX5uI z(758JZ;6b=+iG8T9999gXsfgI*^iZ4)XGdy?M|cIn!=%L^1f*mD?Zm*e5C8}!*LMY zkS%HIv_`dy+#!)IY4mKeA_4OuwxBrMxZ&C-%PYxdW@}(mC)=H5 zxSEI9)6=CV=GGN$QH`!z%rg6~NaH_Um?u2-G=knoW!4kOaCV#308pL)7&IL{@H&23 zj4xVsqcYyqchNU4?Q_kL9vN*I6f^#b)jKA|aeXXM25i+9lA6W5zQkrSL|RE?-2VTe z@_5)fHAfIhm0H4%4WiDW7RV=FpFiI&22AAe88CWjFpn&!!7tuK?x7A*#`jZ>;VrBd z&Nw((OnQy6vU4TUPX`qdh{iqtgZN8wN$yHQ1LiN^m0G?AYBN7H zAqHm$U4}pBI5V_}@OXRKvb4q{IrG*ouHgdD4E3E4NW?rZ>arp9BBg0z2{T-EIwY=-2G7Xzco@&WHvQn4vV&?7-mxqw$(ek}=3>#WRAQsJFGpt^fAosuI*=7C5Ef~q521>``3-JFX^pW76+-86WY+;*t?9)qR&6 zvAdBlj*YZ}c-?nQ$oMcN**GW*ZMgl%-M}xMcT<&jvXOAB>l@BU0-20syP?G|UOQdc zZteFp@w!S2Q+~p+bX2eyf03gJyeL6PBNXvUx($yEnhcwc6;|LoMoVqz>_X6x4LDp+ ze%~njrWcvSJO9l+!-e4DfWRxWYb&(N`sKf?^OP9+b&!MYV#;w=njqYP#$xnlba%l* zL$lQYRx0d>c5D??Vi8n&DK&8>M(h3T;XS{_RY1Jxihtn zbarW&%JClIRgrg9xY_E;Detw`P&Q{@HA>w=M2H*73(!O`f?kwKGcTdbYl&w$nwF7n zt>zv7^fE$|)rOFJb};3CNV~%Cag5s4iW6|=@?I-@g-89K?zeVIn)6Jh zchP>fV?H1H;cUlVK{{c&Z~XIiek@9`hr*m_`>;QJ_PVf62sCsHzJWjAm(aTq$B4Dx zy#w7bUR`4Eqnr1KYTP;DE3|8wY~o7oD01y0uN za8-txy=$CvMm*b@hGw@879r-{(%`1>+nNgh)tR^&Zg4jX?Vzl*9h{ImYj+!xJ1Yyw zz*`41iWRkiHPX(^r6d}PQwmMpU|!kp7BUUqH@E+>@_F)!1H=Mk26p?Hap|9`>$K+} zkIXMT%}>fso7rL;;cZp8YuQ`DRf6FfA^~Fcj|TM;Xq)9}@QDF72d>DG*9lG+f)EwR zRUZJW)R1;FPO}4$fhc=+19bM-eY&_V{N$G^`>rFP^d=KyCC<={rXbf?oBapX3hYf) zb{1)wm5)_8nVI^e?*c$q2UfIQWmdM&)nD^fV@qw{T#V&o0cxIHhj{Ps`s z3yphe-zujm}NLvu>h!FKhy1v@9jLY%5S2u(nNGW zGi-*(oAp;RWJa(iJstxMRGf;mbR}OU8`5G%6(2_Q-T*ptj8zi!4w2e?AK;@ELJnw* zF-xG=3L$NIhUU?kQ(dPCgyPYeVP&jgI`sz&-(IQ&oO1I;iQE=4cm=CLJ#6VK z@@Ec-=dqaCmSTR~Bc6!;t@|57zgxapUuN(L4M%X@%S<|6=52hO-)&J#R#`l943Ky3w5KCL};b0!)U!K zATuM;7-F?GE>R)`*31P9ntn}Ht5Ya)mz_H=FAsmhL<>M&JJH#?+rN50(M{VUmh8gM zC~(A{v8mh6($oEwFwq4}KF8V@U{h`;XYL|e`|TEph~vUmcej+6&Whn4*iu~5-<360 zL`JwpU8-cBvLab(AD)E7p|^Q3bR9&{BZjLQBU>UmC}k{V?q!FP=1FdfIT6S~OYV() zP^bW(^#2e;5+gpo01|*{)3`b(7L@oRhk0UgEUu3F#HoP&4w0ez$i1Xy(C7eoCX=hEe`r0vC~-GOK5@9Nv&IFKfuLUAx}X+s3-V zYB0j_=W7NolcUMQzJwAG6$XL=9WF-q*r`GHC?Ew~{V;(=!_x9*c@cYM;OMb+c($WB zTg`2<9Bw@#y4{tJC><77FkniL?k4|9VEh+#FPmBgmKtWAPqEad9ck)?I|f*z-0r@P`fK3 z7;{iXKCgf6whMbnR$2s3Uj9Q|5CKOcA}vK(HIF;O6RQ(d31-dOo9kt{Q$LbB z%kpDVBN%J7n-!0LCM)JCtQd{}S$I7}{Q`fUGqJ0-Qo#$2Cb^TfXF1DOHW zwXj7f57^iSd$oLB8QCnIw5^^T6L~8cd$NiS!3L|r=a+J%duK_zH7x#I0XvUYB?69* z<~2}*a%W?Ne&(i0$;Obv#IizsV23IxIqWlxXAW&a4Rx(A<#eyzbZhdaJEQopTbwya z+8>-ptD^=2;yTV1qmBrpSKjm0YyYQ0R>?mXPCNR+jqo_U2=%;W5_eg38lsyyHiFbe zEb5}JW%v4gD3nj`PPAp*6nihtIgLhY1)P!}8Bv6(;6Y zPL|OSI*t$$*k_J9I{y(p@ko^KyOu*kRmz5?I^}+FQY{2^EPgsTx>CgiiVQYs^PUM?_<*chs-MfBHW{(~a3 zVxNdlq2mtFUwD1U2^HLV95e5dSe9fe}PSKWfq!sa^+Ssf)=u)+tHSfbE>D9PUKvjk@?s`$GVlI!^j}YZ_ zJ&)l21viBYu%qIC8K)DjtneH()mHDAu#$OEZfNDoX0aWIUd{)IGaH+bI{>RuLQDo3 zSRV7a9Ic*{6^?S~t*ibWB-4Y=A6P7Ef`k7au{bKQCuC^hCvwcnFilBii!OCqU5g&n z3#+i1WU7jgUM#Sa=9)IB`83Z<-j=n)ib!`KrtW|PZgvU` z)GnZU=t{6ru>ca*;vNDb&ifT=NN(s2u~m93hNL!H*GA zQt%yPhm3jF07IUayEU>?Cm>ts{B2`eS6;P(Hx*eal$>$mD|a5#Kb4Vwz_GUbWAizU z8Nv(hPOE9(I@oN4x?^si?FqTtMOR4$>c7g*l$!PNm(`aK4-+38=P>X2j?V@grY^$H zpz2k(oL`M6f@X8?lK2`w=G*j7m%^+@k7r_M@H;8&1_XQubh0k|mh_7KC*XcMi=A(J`*?C@#F-r}ff$D8U|wvtw>VlCV> z)5=a49a%f~8#1ZZyT=twXBL5bqlErTv836U-X{7Kr_{F9Y?f@o3M*9$jkdUchlVWo%pO zPXzDBAAayyzVqoVxF@|h@U`eVGPVPT;!J{idfBv&jbY?AnAGcJ<>oZhFciCUkA?YngVQP)?FT z2@~|_iJs$vp5$H2+g$JYY)_=hv2gI#XBmtq06^BU*G9pt%&M4jtz3rXr)(XZG) zh^Pj~KyQ2KO14exO(1f4{JCui_I;<&xLIt>*3P<|bUFQeVUmr9aiXwX<;VyBAS!Cj zmsu2fn$6P<*GuGkiH+A7=m&9pd1bO048{(r`bqBsFx0LIQRpy5`&V^8hmYE+@l*j0 z3qZ!$HlGN{$VDWd<{sj4J+N)Y=tug)?5c@b??;zra51WyzLiorZsM@PraNDTXpNxN ztxdr*_ya~))D+$9OD2W*Xd`5POP;>Rj$KNt|uBuzC6f^*3| z0=;02PU;UMb$18v>R+w$WZ#}YD^p`~)kYCW>Q`AXO!D&)QU9@Uk9kXVWT z${dvy(2@69>G|tpKB3JMn(+e_cdB$@unONNX7Xu%rdX6 zADxJ~haBd#^8XDE7$(c3FRQT&jacHUnc?2^@JmBg1O3Em&f2YYoLZa_?hiAp;mj^P zyGM^qjab{t8|*IXH)m$@*{bg1Fm%b7ge;BFiqqw7x6^rR~ZVmJ80g+sWp#!tbgP%Xq-uX0dn-1 zfn0@KNKM(qtR38OTBEPA{lJygfKI@gA83yabecp$)2*M&#PG-@&hpt1w$nU?ZQ0IDD8oCle=9*&;}7*xbPeTiUTZ&@CGNR6i$Lw(sDAA&Sa?FWCsf_zoV}BlI=rw$D_9o%50SK2Q{P#c_k$Q8ihP#`0qc|E8b7)OnhYCt2sCU=9uQ%v3w zCQlnRb^dft-XduHk0Z;Omes5nR;{~2w4Ac|g%)Nl^e5$vC#1TYKT%>WESk9fZ5k(v zvRgYy79-I~`an3-7z=+v3&W>t^xv(1NL2}qAawcg39k3OH-GNsN_!)wm8@;5u3i?U zx_V&d)P(gm@ad4){zg4ZkabkM_F6p!O;GU1c}`{vH3gnso)nn+8MRp^jH?~cZY)@D zJIT=-ElS{d1L&2e+yDGj+l|_yu}6YbSL-?Af|qnGL~EQ%fl z-m^HYq~29?J=H85jgwjp`I^^k_^$&KBwCiKO`1%HN0?lP*E9Y3h4~mQ+6`@HY`%cL z8SG~$Mb|Hguqi6)Daf~rtTWx&1HLFHl}VdMj?YjnoO$BjL+QF3uI{#;ys+SR6m)#!vAj1Et`}NnrpOcB(vSTBZ9at` z>WfV0;I>F@Vs3prcY)V0rZM>T`OU1FTTg2CTP8&S!E}T?uVLIT+KRmsw~OEC9saU4 zg)8MnE4gSq+axxuA(+D&+RMkp41LTt&G1D=#8@P>qxT7kGiQrZY}wG9A>FF;s9f@~ z3CGYkzId?r#S_!H%i9BvC*FVTCHWPCimezr*`SEZqsW>~xn^)*V_TeKBaw^2byX|U zk4i^MuG07{d%^TklfzN&(EY)E6`Z5h!?Nc>RlOEb<~Gd>1j# zb360-{_FrhzoWe`__AA^;Y-$}ArjtpMxcX}w)Fxh%H}crX2$OJvA;U2TFZX~+1W3* zHCB+TnYy>a609Ejn12R=A7M3hN78F4#epAbQOBr~dM)r$hcX{eoATZ{ta*!JHoeVP zcT>s_2&JzC^w#2PfUYY2rW~dZY`-oSHm~oZJShxo2vHa9Nt&(Kbym4vG6W87-DAp6 za;WuBpc~pIKb@vX{YQEE{%ZCRdRn?_Beci)@=0;H-C+t@m;geRGoyZXNV^^2~*R zrpqvsoAQ>1QRyH9usF&~e^^h))W=6(u`s&poL(^07j*KCOlSDZK)pYMxCMpBaOlR_ zZX_69`GIH6T82I4;%w6IS5N(k64gb`4&B9_od1-J#>+B;e|L|7DUCYsX1YgaRjV6h zn;{uCe3g`Cxudt5_8OYn9kWCkW~nVv#P?4B1%&Ai)#OiU6JyA!=PWD7H& z*)m^V1hH5rk!4e0B;8g{Blix8cAQ4B52m7%bv4v3idBl$mYNn>%+^!EnOm*rg0TP3j5C@v4%bQY@@spVbWh8(7_TG-``w|@fo&-K5Y?|;;EM-8tt zYu+<CLIlE!ID0Qd(DD1akA)NBw3y`RG75Pz0cukCp3xxnOp@ z|CP5g5jXh8()0|YK7tekD+@R194{;RhK;}mmI;tQJwb%}OePGEKP)_*8|;$uJZo>! zSwZ|ocJBlxfo__%@2Or}f2*bIRJ2Uxt_h!@Vv*uSD41YbVS(C9hw{ONq~9zA#gGi8 zbnNI5xZ=O-92_O!tWjN_<@zA}%uvzk7(Ym%>V)x=Ae{O@NLsn~o;Ww=O7`sFe+zra zrMqNaI+O}R=+g2f!#3H7dZ#Vt@|q(D!i z13*v$`~eV@Bc`0bbOke^Mp7*!p8coMKa}>tot8@@#>)Lyq>b)r+i6+rwcUx^_5WN2 zowe{q4To*MLUhM#q0Y2xYlzs%*YOBUNlfiX5FbOv`MQw@ACqB$O9wxcv6# z5R$j}CX`KNd04S4)KZ1}naEOxm1SZTwP%{Ph(>-K*%d*D4N1yU88deJAcyw5wkoK8 z7A%0mO>{a3t210_n6865PTe{OlQ85Q=MJKbjiZcBN4VhqwjF*W(Yb~Px@&sBJh4C) z>IkH6_75R;H%ic0ie)bB8nxYDJ%hdqSxZE1?Ni{)1D{xz!gCS`Cju29No*iL)XHsT z2Nk2ZyisB+FHf%oltm~eYtnC)tPR+^fLxiBQMZ0KCD7TL;7Osaj*c8>`_GvckHW@< z34TykvPt3Kvf$GQ(-maV739!0)%ZEC8*i&3xqyB^6L)6#1g~49UpW?`Taj+Ve>mh! zqf9q*pmHmduTAeS6<34~%$Y@!1lpWMiVx`|l+~l$doXmrW^V48Kd?*it-J}F5kdUR zuQMI=UrS8XH4!v$Xq4liTp)i21fq!sYkC-5_2~Zuk_;^Q5Z8MruxTF%hA}=JYP^l5 zFHBs$?e7|Lt;IkIhNg2d>bg#AuGRgDRP{n#=WdVmZ8w|7RnFTeYHt%K#PR*RA?EGb z_}CG`%^4>@5xyEd55PQe2*)2EHq{j2^7i>RJ1C~blLlANZ6Ib+39-1OLs$siV}411 zG7ihSIr|v2YmG_PcWEkqh;$+PrID>7F8X4g-55Sz7Ol=+jbK)7ss5`zZS}awN^>|M zJlEc$0wq9WA#mW?aIAbfGA0ddx?4!f=(4A=l%6l18)OFRjIf1Ot0nhh z2+VkWtSy%ZDuuf$XiN6#EYU6EsdsUSsBZ-Ek2~h<)>aLVsSk96;bn#C+@h=Am+N$1 z=^5W*Xx<3tg47&BN{QM2&)xlQfF1(HPC{hqR%hiFfc?<0qRjDYnV%Jm?1-(}YAFP1 zDS&e*FeEpWm>2x{19zbw(7d%}uXeazfl|gA^!l^CD&K*Vsf!R(3>k8Y>q%Lsn^-Vr zec;GO*(u8%s-<~G@4^F@B_r%HAlPh@+cGAk7p`_A&M0^19K z+_S1+s-PeJmsKPqVKfqAjhJLfvlfV{QgbI;La_>ybd65ju{aE|Z!pBYte0ktWcsed_9826gVZMgoqr_jq; ziGBW*UHWZNdSjSo9PB3=GIyParL)+(Z23G?7%z;@kq;VwV+AyP_u%z2?-;HEMHyyRY{>h@KO1Q!s!HbVCw z)$%>+?cDV2uMd6DuBBL(nO;9)d6Ph@(K*#@YcSG=M4gosxc^(gICf^@vm@*s20*EZ& zT+n_uJYJa?KrjfDK$-^1zp2z9l)6x8@N&juJ0^!Q3Qnl<)8l-C^?`cHP=KYnZ+Pfd z2X{NAe`=r|pjoSm&P9|Y{FrZ&nB?IJHYXxEDB0LE!bv$Y(W8p+UgQsiRDS!<^|pVM zkvlUJvG3HR%BkAO0PH4i%~(-6wfr6HiK(e)hAw()=83c-qbCu*uawVg&C^3hcr%io ziK1j~x9EU=k{${zfs-p=F)A?3s~_4(F0~y1D!^|^G4rf7`c-bq-GwkT)7+&}r?V43 zu#+&d1<%ke-z$cLT)rAS@T!3V(T6Ze)VWvl7~orsbx5)oJ0TsCdYAm@=V*{KB zTjKaN*}Ts(h+KFcTvv`$yZMcR`(uVi(KxG%iVrU7kg-I{+NB&C@oTlHI;R4Wxds~_ zrL>JMjk+wrWl;F*rEW&Mo5v7mT=!q+T1QBk0^KbrKw7N$DvAw&NT%z)(o9hglULqB9&w-~XpwP@7X z=$yWQ&yf~5BH)~BW&{K7iSmU2_Kt;KMiK^qwLXGdfzxzG-_B6;Ycu2gJST=e>?R$> z>d*cG9K_i~(9-bQgbO1ij@x!ckO8KA@aJsr+w3@x6esU>=@t=W4XQypgk1L_xv~Ff zQ~*O5ao3r_TBQ*5sEV$7^r&@62#x{cP?9bNG$%dEwG6{iKK?DTtq8=^K#ptTO4U8@ z0DI8#S{Ft-zB*CdaBBEA-%DYU_qX{w`n<>T18qXA8-b9v23g6P39yahplP-{K&If5 z8mF2Xmvm9Nk#Iz$!VD;tqFEmAyB;Z-VIbTHXU9}cc}^2OG&mXULrbp>2Nw;s?Ltc% zU=dX?hK3x0RFJyAO(^@Yv2oMimI^@cneF#YS$wR*5Q@{>D~>M6Iwx#3q;L&V1UN78 zReb^**fCfBm0F0^@I_3iqiR|FRcyLs9<7Ze-BWqA58D`1ydPEVV8!$hD}x+w%_y0E zHmh*D!v0M`9P*GXJ4gK>9_?MxjnFP*8N6kcafnj%2KV+NTD=bx~{y zH&7^!YbXW<(8&%a@C!fI4<=Zk~%xO|8l_=`uL*Mim!evmTX|xxfN4&I; zWQ$Bx2ZQVG$w`d{eJ0Xtbmc>Aa(b;4+*~P?Gj6aiNocxOv|_X`jD7QjxnUlqcTmif z?jDOg_c9K=a)qhkoey0n3QA6R0N9LV;SKeg$wrz?Hyj413hC_>?8&+=ZSl*&h4f3P z9wwKv-qTC9?_z~OQob*QV9u02U;O}!#oqwlyYTe74FN@kh-IPkXC+DBqSU1Kq!<-a zwP{2Z4S?S&RrRH3mY`Ko1ae|cT})*JhGLPx5+__ukg;;Iu8}`hZZEEh6Y8Y3xeAyU zdi!eRq6Xi1$IOa=$U|YgL`E)_G!O|^I9IAPA zK{F)lfEeD*F-abKhOHVqkNr{gxRuHjSDhv#(3}33*hjY{Ki@!wSU39N=MNx_9nqH| zpe=&Iw(y2XPFjgA0-?XsoUQyJGgHoNj8g{mXfua z-HhH9x4xFjD16h|Po^bkIr0TO_+lCZD77GF3fuvOn_MoSaPtNcMa(HxTgm0Hh!m+T z*S|@#hX8oCw**Js2b%S7;FNb+D9T*24+b=psuQp96XTe1Sg$gj=CIB%O-;IM?eihG zBHwpPBGvUOl16D5E_3g%`8s|UN5KlxV#f61ui%~_K#yB9&BtJI##vR6MjjMVt0Wneh z!{5q4f3)SIrGpL^TaIJSST^f{VD8*>*8xPHEy!$R(3}ie>eX*l_)3MOMta9z9vKkT zOx!{SX9H~qI8Zh`y@wfv0tJ;T2t*R^=)HfIC zZ-FW|HQ0zX1aAtEDHE0flrZF$1j;uqJ%Nf(sfHalmIS-s2E;Em{eeGO1MH@P5Nlmw zwqx`fA*1)4thQv2R?#NnYoH%BC4U!CCyK2^fow}tYT&&egc4eXm9DQ9wbq;0xmc?I z%2-@wLh%O1YgAI?Jn2-9uGlMVA?Y1w@+jf&gc%$K(%Qtx3lHRl^g2Px-ZrA;AE{64 z*n97Ah)n-{U5`OxrN7?L`3)LLyJ|yWkF@r+Ge!Y5O?B&9{2z|av&p#_moK$rkikyW zbM;(PY_e?;g$3#FK|mo^YiCL-wve#cC%iHu0nxcz6%B~T4YokOX5G4q z3>dT=9;$}ZsOEW8_hR(*CJ<7|5o@`-qNTk!Tt3qS+R6;7B8&20T2ze9W(=y}A($??SbHyVCz)~xE=YfS0ILTeY~dBP!J z{jc`E11O5+>vu4ql0lLpxJnR2*ho?Y2_jK4h~%)q5_Vx(f@BpzvI3F?0}4tMBuNw` zNfH#6Btf!dkR1GGQUBiSz3;pA>V5ZBy?RbC@+Q_vtx(e$#DgI(R_+Lz~{M zi_F6Ov9gH#qB4O}9*X3m&`i6sDb-iX*#-UgD+k2yP;T0Byp&`%VSl}xwUKLulsZ1k z<0R-xx6pq(L|CaRY{hb!lP&XI%M0hGd)Il>uh%zT$e@-@OStzy$%yUt$&)!_!Pl*| zZV75_FP)V3w7)%4X?6R48Vz&kNW4(buv26k@4Q2_`_;U1BSu}S(5pnA#|vf%MV3hV z7qZ}6^^F!5Frwnp%6?u`^+QSW+{n5kg=9MTTsz|<1U>r8ovWkWPcC}SfouC%iIw-n zPCa`|H$Q8JWNI0H92IacMvN{=a_dt!WUQRYq3v#q9i+FerJ$Bsz(@ z;f5FRvl97a{dwmT8|Up_NKjJcJLVJh_Dvdf;5Svyq=&vABtc{{Rg;MtaGg0>2pLP* zFw>#W3yRf#<5$IEo*OLMUp5zOf3ik4D3I|mUjQASzS)w*Bc;#)Y>SNj*H0=dP+{*? zzNoDiUMd{4e&GoxV-nKV+iquLgBgpb$h@zvmB~_66oS^Rjv4V#?}(6CC1)2OZAu58Zm_`-I!zs{*W4Y|O>D z#<|t8(lTn|8oop=y}7x3IHg&w`gPBRP|I8qr$~iM$nFHojxx*Te1rIrNB z(|hky@+n677#nsl5lr`*bz@1p*lCBet7<=Cs9d@{9hVq;)0!z_TZBQI>xhg9?>=8H zhJ$qIch>qj@baAX%;F z?N3rEkA`x}%kbvSgIsV9oAo32Dz19h_OIU`wVFmqa)xHzx*;E0q%2b3x(2I^PBsl$ zp|xl7-%LnK3d=aQ>5}IBkW267cG%;YfOm}K6M;f;3zbwkmcBxem73UNV(~M!#ZeMO z6@i`|vd11TGL%(P5Gb=V-~$xf;~7$OxbvW7pAdUu3I~Nn?*^~w9EtjkV{VpGd zxw_AUgcqUePdmTiTgjFb$C}dp{)G__vZ~ zZxzimh*LrVsDj|3&)cY^79@4=6Xa=XYv|RL?8_SyFkNzedl0CEtd$e>cd!h*kW~?| ztY2$(bfuXH!p;M(7QOYa!2)y@log3 zk%@j^J00SOyyfk0G@jAwJ$Q7kjE^{G2^R@2)>AIoaXhhdK_UsS>DlKtuu_^$ng6<} zMzWF}Go}_F7mC3)T&2+=$a0qJfEk{^Q zjt+|oJ&BncP;#?frV-;6#q#p9($Y}k7B zyf^P!JjW0I9!C+x_`? zY$=n?Q6Dxc$~T`cZ6p-GJa9z&>O+`+N^D$^FYhG6zraPoD zAup}=)wxJ(A2t~}r14m;Ga&w;1AIk$NIWoiA$EsEYw;a_G>89qJ zL#>khAoK9FS%*k9CbdK4{Ks8d;!>Hx_WR)ci@{}JpmAK(*XtE9M(@hBz0`%3~ zu5Sd+_NZHtAQMNULg{W{yvO?B<6#}v-DLUVbSp(trmAepv1c-{Kw>m|w_ql()a{-% zw&rhJS;Hr5-oL*=8SuJrzZWok2_G*VdsyMC@)N)XlIlJz7IZlZMS;F!&3QDG!5Mr{ zxdc^@d8@9YZ)O5Fr;yVNSC+f={MMZbDVl?VgGc#3?;Cx<;uKzAU;3tI?C5DqL2w>_Jzl#?Yt7hg0l8$}jzd0=XmkXncROq8Np&kB{Zw8n^qeVkv# z1=Ox!g`3vxnlLgtYtc&%#B$HeHTZL7c1W;B!8JX6phljqItfp1-4 z^O)aI7}FpQN8}eMAHKnM2wm#19}$~26d%^KeNMrS$GjlBqKoS2N1xn`hI^_yInzu= zVO2gRG~7zkstqZn`b(L&Vy>5cT+LXSn(%Y9Qw$O}`pi~W5LE1nY&0Jw5Jp^ka2@9k zcWiUZVljD0$*P8}gDL_0%#o4`yn|gcA)RP2)(l@ENv>zOc z*=`yUnRYxDhpdBt@^x%=J-8v(;j3M$eB^vhTePq@e% zUHh4Y&RID&D;*@ul!no;?BHo$)}|W+3ba;1I{g-g3SESAoT;!D7`F(S$e1A4F%YHuZ+uFNxisyMXR-7y1o4c3k#7| z!(2jkIGb&Wn4~<%zKmXlj|2$Murv7Sw>*A!h^IEg<=W|ISJf*L4f9#|J#w9Dj)#X7KU3%}yKGhe z#zv#Ble(8vIMJIBekE^4KQV$H+-M+*27^FVcvW1e-C7wdoO^>y*6QYnv7aQbu-T5r z);V@CXzR~8i|i+&!sF_1*&aJDcZe=a*ZcJn9nR^UZ1@$*+fC{k7hu;9)3Wp>#I9Wb za#z&$X@JB+w}()xM5%8}v-`E%!MOC!wu9>c_zBTCPV%hKySv$u-4z{2^*pG6gpYGD~@@>nRZ=!ugc`sFz071{|8 zvx<^>DT$*uINrh(xN!W|7>dtZW_C<^i;04vvbz5A<08z`j5CU5>_)04>cgTfpK2XW zF(Ll*NRlN;@l5Gvqv`XhM=;qaX-{yOKf5+2mgUTsLbu%YjIPq1$u+&jMijUf;gT>q zp1z6OUDD|}$6Iap(0e;w-25mm8+n4O^Y9j3PXwdKnB^rJcOQsCn2bJGw}O`c3}dd( zorOy6XuLjohbdi|1VdL8{Z;ke2QKt}P1gk8oT?I2lBHE+D9hB#pnu-B|7?Imk|~}- ziaa&garZrIS=GBM?0y2&H(Ju%$k(X|!i!^zTXBbYa2D}KYp2?}=dHrZ%>~qC0(%+j za~wZf8mxw>pJVWU>Zs2#rkSG|>#5m=`6_t#vQ|b?>{QY%gL6Xxv>7c3)YF`0*=Y}f z+E-KBHe>Ap^SAg4(1Jn{;^GP|pDAA5Gj@RiFNWY2qbnbjC_ z=WqjBp+RK(Am^)~a~-MIJq8O$r4`u5!-$b?b#4|%DE<9tstBGB;b-_>b$ltT)ja;9 z+Zc*)T4nMDLFm1oY*P1BXB~QW*!Uh3hC)(5W%Byu`G-6*A~)JrN)ylRVdWV6QA1Do<5P)+N;#|tkL#!E&4q6E~KeePFIzUp8oq|UGTb=%MbkH=tJn5LQE|8 z?L}D1!I09pGapxGk6pe$EB?~YDyG=x(HB+Dp+>fmai3#DsoXxrhfWUN7iT`3>VP=r z*LD(dA!3G7^%)!QMLZ+4qc|zw{YB%}%1C_t-KB=&Ip^wJ%lXzMqLZp;2f;o#t6*gHePrYS zW%iTta;Ci(lDp^jr=!&MLC~E73AVe_Pod4qWwVz-h|X3R&>F#Yw5gXfx%ANawg3R|j*+9E#oI=WySlMK#Jl+O%52bZ<&2n%3ga*bxC@?v~& z0_p@g?E88;+VH96)9smec1N^61Q<3^btkaps+<^*J0bGy%iecWlS7Q*@8h;=Li+q$ zK7g+S@4Y1ICLDj%bk&J0!Vt1&I6Yba;KaFO&g2WHx#GBj&~($r-1ccbl1dgP{yatP z5UKlbyx-YSrU?qmDJN&M*}YB=h@*Oy8TqEA)3?_J$f?=8N}<=Qjuqf2l5u&}`0~>| zPM$F6vydb)GaAQ*%&=JDG&7?ASlBVRzLrTo-Ixb;^j zmg!sfr|7+Pww}-4S{Gb&_q(CKvG1u!aOloGisM^1)kLLow1|?^DGl3*vc5}K2&O4t zj)W}>MnTjzUnp6Y_MCa&_wIi2Lwom)=zb5KGiTw=`V3063TNzneGRk6lh&R$s07Eo z;EL8nf;8*b_jz7K4GG9hB%gm38cPiZ?&Jm(IJHdgq*2Siyfydc#t5}$+ZFFug6zL40)lBjB zPTy39w!oMZaCit@2=Xm=CkPz!H~H;eb+*TXm%&njGC`HmSSzrQ2vK3=pH{>BXy`O7 zOm)lDck%1j8vaD1`GiLnxPo(&sJX~DFW7r~`x9ULLeS*Ek7Lky&}X3on7=t8V*FJ? zEzkg5FCbZV!Kr9ti~|^q7G7PMHSZv@+^GcSCnCyJ={;kf^?!A1Fa;_*qh1|N>h217 z-b+{M{q-zRnS!46v@&N&@ApaENA9Z{dN-Y;d=n_Q{LES^VF_EO1XOJ{I_JdvbYtY3 zC-tiey_XqS0f$tk@@@Ws=|$J(4@Rr*6%PS&Uy-Nnx=+2|+YH7TbG+&pX`Y-K8-W7i zZ0~iKT!RPWvcnZCP6XhJGX)M-u>i9p95RCLXpOS6Mq^`+4`J0@G1dEEK;NYO07Kb5 zh0QImW?I`({S=PM4;6p|?zvYU@478djBz}z`BtPUm;X~|-8 z!ENJ379dM;Yphd-SEEQ=nHh7&_mb_hZSjfx9P*2kQ8Yw71a;LvK^ z&H}9Typy>+a63;YLp|4t&qsgDC}?01vKvJVIrx%%t#^rMfLB^|#T1BUdC9!8$UafR`&}PS16mRw>&~m|m zSiNj(+pW$|g%bDh%L%=-=BuXNE<*yoPV1Xz!*uz4l=xKU_f%@fowKdMHT26U6f?W5 z_j$3Bw|>4Ji6-jnUGreCQ!k+%LmdZ9giyz&={dysxfS>>MpzJ$t%_STQ`n{H>M1vB z5WKbx<}-jl7Gy0$d-pL4pOe)*V7h2}7MHtvTyZ%;JYUmwk@D-tS2|#;4pW+-U-3kp zpO5CE;w&GY-%V`Uw4czYnxLE%MtBKuJU2FD0d$;RvS-Z?Zvsx(Z-k0Bj9wGWH*)3u$a2eU!VjAZmjM1l}y2NhN_mz zfSN4aP3%cmkr}T8eL)6<_w#Ur%Gip7MyD7D8NW3K(y)gYRJ6(i2NGN(K}G@PU7iph zvPwHv;i=ZB1XrAnl`2`$U$*4ZDOIaI0Bl*PGJQUPs|X)Zb-}6a z*;_dgA5>U&Biy5cO91e^AT#*fQz?P!V{JAU+aBGG=KD>+{QHa(dKy|WEGV$krgY3Z zE$IQi(wY{EZ=Ex0C0AnNyPvG~vh<`ZAGR1<-qSE z249Sb5*`_8t7xET3c!%ad!KbXZFQ*wM!UG-g^{$+as0^-7X6A7-|Y9L&i8-KdVgK@ zd7HR<^Bgui?}V!o6>di?o)+-VQdu%>Xi9o5BptB&a_wG%fxva$ySyG`CSMpU978`X z3zTg%H-GFb?Aiw_1H0l~-H*~5yl>~?%Qx5aZ0B@^T|7BY_I+gH>oXKQO9R08Ge0KA ziFwHcS@T<0%i}bMb&cK*Jo65#_X==O8d8`=Fb@DG+P&oT^z=cjJ+xj&%@EMCQTjvq zstq-`qT;xczWbxXK+!w3?Jzv8S^R!%?V7+SZaZW{%sW0Vs_&!iTEuJMPX2u?&#jX! zw~_8l23E%fJNvWGh=f13n46qUGB2)$wow9k&o<7tinivF&x`JX>m|@;jA|67X0?$QI;W8#g)@x&fR$EV1exWl@Nd=Vw<$I7|jzR5~U zHxYAeR*Brcwwt=n{C(Koi|5-+Sw+(CA3*Z4qvy{Y5PU`n7@nJXs9RCdvNXMa9y@B^ zvuwGjA(8rQ#`bKK_`2FQLnUyhLx!8Xk(1IYu>!x-GBYshwbY)VNOtTQv7G0LJl{11 z^9YhTv=9lGNaXf$+Ee(Y!=?Au=opU1cy(TM`-=f}-Q9ML?fARZ01V4BTh^OY;kc4t z=set;Gu5oDQ&KUMn(QnfSTpo2Y`Yya5pC~0)utpq%;E&RB}MUdPv#w5B?Vf=dBYfs z*VxmI_pk!Bc^`VqgCi=&vpcslttOS{Je!k0_e z%L}pPUpN4h)a1hcC6U_s{Hf9F(e^CsH*j|q_aA$fic9bSyQYaoySDR6#{HOnLGOh= zPqp%>J56WaHpMA9kJ+)F;{+3Y_*8hmF zrI$LdswF6cz+|1S1v3VNZM+h&w~{lof+LTZ#JL>t zY6J@Oy>}l!;W@zbe4y_DV1V&(oqMdz?M61}go`9LmEXybg#!j&%*8(*cfXxIyCthT zpo@RwIl=ySorIqr0_aDmm^#@)NPdRK-#H5!m!$uPeFfw{_zHh;6yS)TZo)yLxrvDh zsEU06+282kFYe=SM+wx`Z%663qx9QR`t2zFc9ecQO1~YY-;UC6N9nht^#8G=g!n&q zl;9*sa2`(QuWeP_K(0a^bjFc{nV zRu3252w(&dqEEm(qama(L&zV?@N+Y$pZESK#Q&(UKX-xxUF!dFVRr+c6wrSV_Gb+A zUl#Uf0QBDwmLvw)!~Q|opOMjjS6IZ)*_D4o*zXfB|Glt?pR*hPvamlbR*hn=z&Ri2sfapWjWaS`GEwmfqdvss=`&s7uS%G8;ak~%( zlCgt+pX&m{^Au18_>ajv1q1|6YP?_}6p)Y~1D65{0w>k7#_pXUPX{!ffI;uZ_Eka4 zzT5jiG=vNSLh9xkr2N6wdFdM%0^~pgwt$NHBM>uoT4Mtqk~nRG}avLL~0_k z7(56e0cC?gl@Z_CfvU`}E#VS44u5M4yLD&?qDR0?L6ppQGDuLBlY{90xGJRy(Fd5*DIQ%tIJ160A3_&Q&&l`8~H~(}WDG_)Z_yINjQ`v5ym=rJw zLWBZ*X8C?VgoT8VLJ&*HHyTnDtVGD}H3;@Q4J9m$0&NcI_z#Wr2QX?-fEKe&vf2LJ9p^K9rD<@L$RzBrNt9{RxSJTKbiT7!vH`f95L;L;k8ilrUWQ zS3RSI5h9>H`Q=^^Li87!5Xq?hl1_|dTmDQFfs>5!4;le)im^lEDM&p*6XT91wHK%c q4hQ1NNS&2rEwPq3$gT<6?Ho=7(B|y6EtCjKRFs07`=Y8G#eV@@&{z%t literal 0 HcmV?d00001 diff --git a/desktop/core/ext-py/boto-2.38.0/docs/Makefile b/desktop/core/ext-py/boto-2.38.0/docs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..91d9bf278f45dfe36f806ffa12ef44ac827e07ec --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/Makefile @@ -0,0 +1,95 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source + +.PHONY: help clean html dirhtml pickle json epub htmlhelp qthelp latex changes linkcheck doctest + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " epub to make ePub files (sphinx >= v1.2b2)" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The e-Pub pages are in $(BUILDDIR)/epub." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/boto.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/boto.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/desktop/core/ext-py/boto-2.38.0/docs/make.bat b/desktop/core/ext-py/boto-2.38.0/docs/make.bat new file mode 100644 index 0000000000000000000000000000000000000000..d6b0b7b6a5a338a6062b4ddceb510f8e186de2ff --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/make.bat @@ -0,0 +1,113 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +set SPHINXBUILD=sphinx-build +set BUILDDIR=build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\boto.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\boto.ghc + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +:end diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/_templates/layout.html b/desktop/core/ext-py/boto-2.38.0/docs/source/_templates/layout.html new file mode 100644 index 0000000000000000000000000000000000000000..cdf85bbf0a395c7425fffc8ec62808308405a01a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/_templates/layout.html @@ -0,0 +1,3 @@ +{% extends '!layout.html' %} + +{% block sidebarsearch %}{{ super() }}{% endblock %} diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/apps_built_on_boto.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/apps_built_on_boto.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d1bf4831367bba8d846a51d5492e67483dad8ea --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/apps_built_on_boto.rst @@ -0,0 +1,51 @@ +.. _apps_built_on_boto: + +========================== +Applications Built On Boto +========================== + +Many people have taken Boto and layered on additional functionality, then shared +them with the community. This is a (partial) list of applications that use Boto. + +If you have an application or utility you've open-sourced that uses Boto & +you'd like it listed here, please submit a `pull request`_ adding it! + +.. _`pull request`: https://github.com/boto/boto/pulls + +**botornado** + https://pypi.python.org/pypi/botornado + An asynchronous AWS client on Tornado. This is a dirty work to move boto + onto Tornado ioloop. Currently works with SQS and S3. + +**boto_rsync** + https://pypi.python.org/pypi/boto_rsync + boto-rsync is a rough adaptation of boto's s3put script which has been + reengineered to more closely mimic rsync. Its goal is to provide a familiar + rsync-like wrapper for boto's S3 and Google Storage interfaces. + +**boto_utils** + https://pypi.python.org/pypi/boto_utils + Command-line tools for interacting with Amazon Web Services, based on Boto. + Includes utils for S3, SES & Cloudwatch. + +**django-storages** + https://pypi.python.org/pypi/django-storages + A collection of storage backends for Django. Features the ``S3BotoStorage`` + backend for storing media on S3. + +**mr.awsome** + https://pypi.python.org/pypi/mr.awsome + mr.awsome is a commandline-tool (aws) to manage and control Amazon + Webservice's EC2 instances. Once configured with your AWS key, you can + create, delete, monitor and ssh into instances, as well as perform scripted + tasks on them (via fabfiles). Examples are adding additional, + pre-configured webservers to a cluster (including updating the load + balancer), performing automated software deployments and creating backups - + each with just one call from the commandline. + +**iamer** + https://pypi.python.org/pypi/iamer + IAMer dump and load your AWS IAM configuration into text files. Once + dumped, you can version the resulting json and ini files to keep track of + changes, and even ask your team mates to do Pull Requests when they want + access to something. diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/autoscale_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/autoscale_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..abc60957e67060d80fa7fb062713044fca47c629 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/autoscale_tut.rst @@ -0,0 +1,218 @@ +.. _autoscale_tut: + +============================================= +An Introduction to boto's Autoscale interface +============================================= + +This tutorial focuses on the boto interface to the Autoscale service. This +assumes you are familiar with boto's EC2 interface and concepts. + +Autoscale Concepts +------------------ + +The AWS Autoscale service is comprised of three core concepts: + + #. *Autoscale Group (AG):* An AG can be viewed as a collection of criteria for + maintaining or scaling a set of EC2 instances over one or more availability + zones. An AG is limited to a single region. + #. *Launch Configuration (LC):* An LC is the set of information needed by the + AG to launch new instances - this can encompass image ids, startup data, + security groups and keys. Only one LC is attached to an AG. + #. *Triggers*: A trigger is essentially a set of rules for determining when to + scale an AG up or down. These rules can encompass a set of metrics such as + average CPU usage across instances, or incoming requests, a threshold for + when an action will take place, as well as parameters to control how long + to wait after a threshold is crossed. + +Creating a Connection +--------------------- +The first step in accessing autoscaling is to create a connection to the service. +There are two ways to do this in boto. The first is: + +>>> from boto.ec2.autoscale import AutoScaleConnection +>>> conn = AutoScaleConnection('', '') + + +A Note About Regions and Endpoints +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Like EC2 the Autoscale service has a different endpoint for each region. By +default the US endpoint is used. To choose a specific region, instantiate the +AutoScaleConnection object with that region's endpoint. + +>>> import boto.ec2.autoscale +>>> autoscale = boto.ec2.autoscale.connect_to_region('eu-west-1') + +Alternatively, edit your boto.cfg with the default Autoscale endpoint to use:: + + [Boto] + autoscale_endpoint = autoscaling.eu-west-1.amazonaws.com + +Getting Existing AutoScale Groups +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To retrieve existing autoscale groups: + +>>> conn.get_all_groups() + +You will get back a list of AutoScale group objects, one for each AG you have. + +Creating Autoscaling Groups +--------------------------- +An Autoscaling group has a number of parameters associated with it. + + #. *Name*: The name of the AG. + #. *Availability Zones*: The list of availability zones it is defined over. + #. *Minimum Size*: Minimum number of instances running at one time. + #. *Maximum Size*: Maximum number of instances running at one time. + #. *Launch Configuration (LC)*: A set of instructions on how to launch an instance. + #. *Load Balancer*: An optional ELB load balancer to use. See the ELB tutorial + for information on how to create a load balancer. + +For the purposes of this tutorial, let's assume we want to create one autoscale +group over the us-east-1a and us-east-1b availability zones. We want to have +two instances in each availability zone, thus a minimum size of 4. For now we +won't worry about scaling up or down - we'll introduce that later when we talk +about triggers. Thus we'll set a maximum size of 4 as well. We'll also associate +the AG with a load balancer which we assume we've already created, called 'my_lb'. + +Our LC tells us how to start an instance. This will at least include the image +id to use, security_group, and key information. We assume the image id, key +name and security groups have already been defined elsewhere - see the EC2 +tutorial for information on how to create these. + +>>> from boto.ec2.autoscale import LaunchConfiguration +>>> from boto.ec2.autoscale import AutoScalingGroup +>>> lc = LaunchConfiguration(name='my-launch_config', image_id='my-ami', + key_name='my_key_name', + security_groups=['my_security_groups']) +>>> conn.create_launch_configuration(lc) + +We now have created a launch configuration called 'my-launch-config'. We are now +ready to associate it with our new autoscale group. + +>>> ag = AutoScalingGroup(group_name='my_group', load_balancers=['my-lb'], + availability_zones=['us-east-1a', 'us-east-1b'], + launch_config=lc, min_size=4, max_size=8, + connection=conn) +>>> conn.create_auto_scaling_group(ag) + +We now have a new autoscaling group defined! At this point instances should be +starting to launch. To view activity on an autoscale group: + +>>> ag.get_activities() + [Activity:Launching a new EC2 instance status:Successful progress:100, + ...] + +or alternatively: + +>>> conn.get_all_activities(ag) + +This autoscale group is fairly useful in that it will maintain the minimum size without +breaching the maximum size defined. That means if one instance crashes, the autoscale +group will use the launch configuration to start a new one in an attempt to maintain +its minimum defined size. It knows instance health using the health check defined on +its associated load balancer. + +Scaling a Group Up or Down +^^^^^^^^^^^^^^^^^^^^^^^^^^ +It can also be useful to scale a group up or down depending on certain criteria. +For example, if the average CPU utilization of the group goes above 70%, you may +want to scale up the number of instances to deal with demand. Likewise, you +might want to scale down if usage drops again. +These rules for **how** to scale are defined by *Scaling Policies*, and the rules for +**when** to scale are defined by CloudWatch *Metric Alarms*. + +For example, let's configure scaling for the above group based on CPU utilization. +We'll say it should scale up if the average CPU usage goes above 70% and scale +down if it goes below 40%. + +Firstly, define some Scaling Policies. These tell Auto Scaling how to scale +the group (but not when to do it, we'll specify that later). + +We need one policy for scaling up and one for scaling down. + +>>> from boto.ec2.autoscale import ScalingPolicy +>>> scale_up_policy = ScalingPolicy( + name='scale_up', adjustment_type='ChangeInCapacity', + as_name='my_group', scaling_adjustment=1, cooldown=180) +>>> scale_down_policy = ScalingPolicy( + name='scale_down', adjustment_type='ChangeInCapacity', + as_name='my_group', scaling_adjustment=-1, cooldown=180) + +The policy objects are now defined locally. +Let's submit them to AWS. + +>>> conn.create_scaling_policy(scale_up_policy) +>>> conn.create_scaling_policy(scale_down_policy) + +Now that the polices have been digested by AWS, they have extra properties +that we aren't aware of locally. We need to refresh them by requesting them +back again. + +>>> scale_up_policy = conn.get_all_policies( + as_group='my_group', policy_names=['scale_up'])[0] +>>> scale_down_policy = conn.get_all_policies( + as_group='my_group', policy_names=['scale_down'])[0] + +Specifically, we'll need the Amazon Resource Name (ARN) of each policy, which +will now be a property of our ScalingPolicy objects. + +Next we'll create CloudWatch alarms that will define when to run the +Auto Scaling Policies. + +>>> import boto.ec2.cloudwatch +>>> cloudwatch = boto.ec2.cloudwatch.connect_to_region('us-west-2') + +It makes sense to measure the average CPU usage across the whole Auto Scaling +Group, rather than individual instances. We express that as CloudWatch +*Dimensions*. + +>>> alarm_dimensions = {"AutoScalingGroupName": 'my_group'} + +Create an alarm for when to scale up, and one for when to scale down. + +>>> from boto.ec2.cloudwatch import MetricAlarm +>>> scale_up_alarm = MetricAlarm( + name='scale_up_on_cpu', namespace='AWS/EC2', + metric='CPUUtilization', statistic='Average', + comparison='>', threshold='70', + period='60', evaluation_periods=2, + alarm_actions=[scale_up_policy.policy_arn], + dimensions=alarm_dimensions) +>>> cloudwatch.create_alarm(scale_up_alarm) + +>>> scale_down_alarm = MetricAlarm( + name='scale_down_on_cpu', namespace='AWS/EC2', + metric='CPUUtilization', statistic='Average', + comparison='<', threshold='40', + period='60', evaluation_periods=2, + alarm_actions=[scale_down_policy.policy_arn], + dimensions=alarm_dimensions) +>>> cloudwatch.create_alarm(scale_down_alarm) + +Auto Scaling will now create a new instance if the existing cluster averages +more than 70% CPU for two minutes. Similarly, it will terminate an instance +when CPU usage sits below 40%. Auto Scaling will not add or remove instances +beyond the limits of the Scaling Group's 'max_size' and 'min_size' properties. + +To retrieve the instances in your autoscale group: + +>>> import boto.ec2 +>>> ec2 = boto.ec2.connect_to_region('us-west-2) +>>> group = conn.get_all_groups(names=['my_group'])[0] +>>> instance_ids = [i.instance_id for i in group.instances] +>>> instances = ec2.get_only_instances(instance_ids) + +To delete your autoscale group, we first need to shutdown all the +instances: + +>>> ag.shutdown_instances() + +Once the instances have been shutdown, you can delete the autoscale +group: + +>>> ag.delete() + +You can also delete your launch configuration: + +>>> lc.delete() diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/boto_config_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/boto_config_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..37c22f045efc7105de06c2517752f38be078ea1e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/boto_config_tut.rst @@ -0,0 +1,442 @@ +.. _ref-boto_config: + +=========== +Boto Config +=========== + +Introduction +------------ + +There is a growing list of configuration options for the boto library. Many of +these options can be passed into the constructors for top-level objects such as +connections. Some options, such as credentials, can also be read from +environment variables (e.g. ``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, +``AWS_SECURITY_TOKEN`` and ``AWS_PROFILE``). It is also possible to manage +these options in a central place through the use of boto config files. + +Details +------- + +A boto config file is a text file formatted like an .ini configuration file that specifies +values for options that control the behavior of the boto library. In Unix/Linux systems, +on startup, the boto library looks for configuration files in the following locations +and in the following order: + +* /etc/boto.cfg - for site-wide settings that all users on this machine will use +* ~/.boto - for user-specific settings +* ~/.aws/credentials - for credentials shared between SDKs + +In Windows, create a text file that has any name (e.g. boto.config). It's +recommended that you put this file in your user folder. Then set +a user environment variable named BOTO_CONFIG to the full path of that file. + +The options in the config file are merged into a single, in-memory configuration +that is available as :py:mod:`boto.config`. The :py:class:`boto.pyami.config.Config` +class is a subclass of the standard Python +:py:class:`ConfigParser.SafeConfigParser` object and inherits all of the +methods of that object. In addition, the boto +:py:class:`Config ` class defines additional +methods that are described on the PyamiConfigMethods page. + +An example boto config file might look like:: + + [Credentials] + aws_access_key_id = + aws_secret_access_key = + + +Sections +-------- + +The following sections and options are currently recognized within the +boto config file. + +Credentials +^^^^^^^^^^^ + +The Credentials section is used to specify the AWS credentials used for all +boto requests. The order of precedence for authentication credentials is: + +* Credentials passed into the Connection class constructor. +* Credentials specified by environment variables +* Credentials specified as named profiles in the shared credential file. +* Credentials specified by default in the shared credential file. +* Credentials specified as named profiles in the config file. +* Credentials specified by default in the config file. + +This section defines the following options: ``aws_access_key_id`` and +``aws_secret_access_key``. The former being your AWS key id and the latter +being the secret key. + +For example:: + + [profile name_goes_here] + aws_access_key_id = + aws_secret_access_key = + + [Credentials] + aws_access_key_id = + aws_secret_access_key = + +Please notice that quote characters are not used to either side of the '=' +operator even when both your AWS access key ID and secret key are strings. + +If you have multiple AWS keypairs that you use for different purposes, +use the ``profile`` style shown above. You can set an arbitrary number +of profiles within your configuration files and then reference them by name +when you instantiate your connection. If you specify a profile that does not +exist in the configuration, the keys used under the ``[Credentials]`` heading +will be applied by default. + +The shared credentials file in ``~/.aws/credentials`` uses a slightly +different format. For example:: + + [default] + aws_access_key_id = + aws_secret_access_key = + + [name_goes_here] + aws_access_key_id = + aws_secret_access_key = + + [another_profile] + aws_access_key_id = + aws_secret_access_key = + aws_security_token = + +For greater security, the secret key can be stored in a keyring and +retrieved via the keyring package. To use a keyring, use ``keyring``, +rather than ``aws_secret_access_key``:: + + [Credentials] + aws_access_key_id = + keyring = + +To use a keyring, you must have the Python `keyring +`_ package installed and in the +Python path. To learn about setting up keyrings, see the `keyring +documentation +`_ + +Credentials can also be supplied for a Eucalyptus service:: + + [Credentials] + euca_access_key_id = + euca_secret_access_key = + +Finally, this section is also be used to provide credentials for the Internet Archive API:: + + [Credentials] + ia_access_key_id = + ia_secret_access_key = + +Boto +^^^^ + +The Boto section is used to specify options that control the operation of +boto itself. This section defines the following options: + +:debug: Controls the level of debug messages that will be printed by the boto library. + The following values are defined:: + + 0 - no debug messages are printed + 1 - basic debug messages from boto are printed + 2 - all boto debugging messages plus request/response messages from httplib + +:proxy: The name of the proxy host to use for connecting to AWS. +:proxy_port: The port number to use to connect to the proxy host. +:proxy_user: The user name to use when authenticating with proxy host. +:proxy_pass: The password to use when authenticating with proxy host. +:num_retries: The number of times to retry failed requests to an AWS server. + If boto receives an error from AWS, it will attempt to recover and retry the + request. The default number of retries is 5 but you can change the default + with this option. + +For example:: + + [Boto] + debug = 0 + num_retries = 10 + + proxy = myproxy.com + proxy_port = 8080 + proxy_user = foo + proxy_pass = bar + + +:connection_stale_duration: Amount of time to wait in seconds before a + connection will stop getting reused. AWS will disconnect connections which + have been idle for 180 seconds. +:is_secure: Is the connection over SSL. This setting will overide passed in + values. +:https_validate_certificates: Validate HTTPS certificates. This is on by default +:ca_certificates_file: Location of CA certificates or the keyword "system". + Using the system keyword lets boto get out of the way and makes the + SSL certificate validation the responsibility the underlying SSL + implementation provided by the system. +:http_socket_timeout: Timeout used to overwrite the system default socket + timeout for httplib . +:send_crlf_after_proxy_auth_headers: Change line ending behaviour with proxies. + For more details see this `discussion `_ +:endpoints_path: Allows customizing the regions/endpoints available in Boto. + Provide an absolute path to a custom JSON file, which gets merged into the + defaults. (This can also be specified with the ``BOTO_ENDPOINTS`` + environment variable instead.) + +These settings will default to:: + + [Boto] + connection_stale_duration = 180 + is_secure = True + https_validate_certificates = True + ca_certificates_file = cacerts.txt + http_socket_timeout = 60 + send_crlf_after_proxy_auth_headers = False + endpoints_path = /path/to/my/boto/endpoints.json + +You can control the timeouts and number of retries used when retrieving +information from the Metadata Service (this is used for retrieving credentials +for IAM roles on EC2 instances): + +:metadata_service_timeout: Number of seconds until requests to the metadata + service will timeout (float). +:metadata_service_num_attempts: Number of times to attempt to retrieve + information from the metadata service before giving up (int). + +These settings will default to:: + + [Boto] + metadata_service_timeout = 1.0 + metadata_service_num_attempts = 1 + + +This section is also used for specifying endpoints for non-AWS services such as +Eucalyptus and Walrus. + +:eucalyptus_host: Select a default endpoint host for eucalyptus +:walrus_host: Select a default host for Walrus + +For example:: + + [Boto] + eucalyptus_host = somehost.example.com + walrus_host = somehost.example.com + + +Finally, the Boto section is used to set defaults versions for many AWS services + +AutoScale settings: + +options: +:autoscale_version: Set the API version +:autoscale_endpoint: Endpoint to use +:autoscale_region_name: Default region to use + +For example:: + + [Boto] + autoscale_version = 2011-01-01 + autoscale_endpoint = autoscaling.us-west-2.amazonaws.com + autoscale_region_name = us-west-2 + + +Cloudformation settings can also be defined: + +:cfn_version: Cloud formation API version +:cfn_region_name: Default region name +:cfn_region_endpoint: Default endpoint + +For example:: + + [Boto] + cfn_version = 2010-05-15 + cfn_region_name = us-west-2 + cfn_region_endpoint = cloudformation.us-west-2.amazonaws.com + +Cloudsearch settings: + +:cs_region_name: Default cloudsearch region +:cs_region_endpoint: Default cloudsearch endpoint + +For example:: + + [Boto] + cs_region_name = us-west-2 + cs_region_endpoint = cloudsearch.us-west-2.amazonaws.com + +Cloudwatch settings: + +:cloudwatch_version: Cloudwatch API version +:cloudwatch_region_name: Default region name +:cloudwatch_region_endpoint: Default endpoint + +For example:: + + [Boto] + cloudwatch_version = 2010-08-01 + cloudwatch_region_name = us-west-2 + cloudwatch_region_endpoint = monitoring.us-west-2.amazonaws.com + +EC2 settings: + +:ec2_version: EC2 API version +:ec2_region_name: Default region name +:ec2_region_endpoint: Default endpoint + +For example:: + + [Boto] + ec2_version = 2012-12-01 + ec2_region_name = us-west-2 + ec2_region_endpoint = ec2.us-west-2.amazonaws.com + +ELB settings: + +:elb_version: ELB API version +:elb_region_name: Default region name +:elb_region_endpoint: Default endpoint + +For example:: + + [Boto] + elb_version = 2012-06-01 + elb_region_name = us-west-2 + elb_region_endpoint = elasticloadbalancing.us-west-2.amazonaws.com + +EMR settings: + +:emr_version: EMR API version +:emr_region_name: Default region name +:emr_region_endpoint: Default endpoint + +For example:: + + [Boto] + emr_version = 2009-03-31 + emr_region_name = us-west-2 + emr_region_endpoint = elasticmapreduce.us-west-2.amazonaws.com + + +Precedence +---------- + +Even if you have your boto config setup, you can also have credentials and +options stored in environmental variables or you can explicitly pass them to +method calls i.e.:: + + >>> boto.ec2.connect_to_region( + ... 'us-west-2', + ... aws_access_key_id='foo', + ... aws_secret_access_key='bar') + +In these cases where these options can be found in more than one place boto +will first use the explicitly supplied arguments, if none found it will then +look for them amidst environment variables and if that fails it will use the +ones in boto config. + +Notification +^^^^^^^^^^^^ + +If you are using notifications for boto.pyami, you can specify the email +details through the following variables. + +:smtp_from: Used as the sender in notification emails. +:smtp_to: Destination to which emails should be sent +:smtp_host: Host to connect to when sending notification emails. +:smtp_port: Port to connect to when connecting to the :smtp_host: + +Default values are:: + + [notification] + smtp_from = boto + smtp_to = None + smtp_host = localhost + smtp_port = 25 + smtp_tls = True + smtp_user = john + smtp_pass = hunter2 + +SWF +^^^ + +The SWF section allows you to configure the default region to be used for the +Amazon Simple Workflow service. + +:region: Set the default region + +Example:: + + [SWF] + region = us-west-2 + +Pyami +^^^^^ + +The Pyami section is used to configure the working directory for PyAMI. + +:working_dir: Working directory used by PyAMI + +Example:: + + [Pyami] + working_dir = /home/foo/ + +DB +^^ +The DB section is used to configure access to databases through the +:func:`boto.sdb.db.manager.get_manager` function. + +:db_type: Type of the database. Current allowed values are `SimpleDB` and + `XML`. +:db_user: AWS access key id. +:db_passwd: AWS secret access key. +:db_name: Database that will be connected to. +:db_table: Table name :note: This doesn't appear to be used. +:db_host: Host to connect to +:db_port: Port to connect to +:enable_ssl: Use SSL + +More examples:: + + [DB] + db_type = SimpleDB + db_user = + db_passwd = + db_name = my_domain + db_table = table + db_host = sdb.amazonaws.com + enable_ssl = True + debug = True + + [DB_TestBasic] + db_type = SimpleDB + db_user = + db_passwd = + db_name = basic_domain + db_port = 1111 + +SDB +^^^ + +This section is used to configure SimpleDB + +:region: Set the region to which SDB should connect + +Example:: + + [SDB] + region = us-west-2 + +DynamoDB +^^^^^^^^ + +This section is used to configure DynamoDB + +:region: Choose the default region +:validate_checksums: Check checksums returned by DynamoDB + +Example:: + + [DynamoDB] + region = us-west-2 + validate_checksums = True diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/boto_theme/static/boto.css_t b/desktop/core/ext-py/boto-2.38.0/docs/source/boto_theme/static/boto.css_t new file mode 100644 index 0000000000000000000000000000000000000000..932e5183dba690681d2d090020e07da4ec0d15e1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/boto_theme/static/boto.css_t @@ -0,0 +1,239 @@ +/** + * Sphinx stylesheet -- default theme + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: 'Lucida Grande', 'Lucida Sans Unicode', Geneva, Verdana, Arial, sans-serif; + font-size: 100%; + background-color: #111111; + color: #555555; + margin: 0; + padding: 0; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 300px; +} + +hr{ + border: 1px solid #B1B4B6; +} + +div.document { + background-color: #fafafa; +} + +div.body { + background-color: #ffffff; + color: #3E4349; + padding: 1em 30px 30px 30px; + font-size: 0.9em; +} + +div.footer { + color: #555; + width: 100%; + padding: 13px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #444444; +} + +div.related { + background-color: #6F6555; /*#6BA81E;*/ + line-height: 36px; + color: #CCCCCC; + text-shadow: 0px 1px 0 #444444; + font-size: 1.1em; +} + +div.related a { + color: #D9C5A7; +} + +div.related .right { + font-size: 0.9em; +} + +div.sphinxsidebar { + font-size: 0.9em; + line-height: 1.5em; + width: 300px +} + +div.sphinxsidebarwrapper{ + padding: 20px 0; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: 'Lucida Grande', 'Lucida Sans Unicode', Geneva, Verdana, Arial, sans-serif; + color: #222222; + font-size: 1.2em; + font-weight: bold; + margin: 0; + padding: 5px 10px; + text-shadow: 1px 1px 0 white +} + +div.sphinxsidebar h3 a { + color: #444444; +} + +div.sphinxsidebar p { + color: #888888; + padding: 5px 20px; + margin: 0.5em 0px; +} + +div.sphinxsidebar p.topless { +} + +div.sphinxsidebar ul { + margin: 10px 10px 10px 20px; + padding: 0; + color: #000000; +} + +div.sphinxsidebar a { + color: #444444; +} + +div.sphinxsidebar a:hover { + color: #E32E00; +} + +div.sphinxsidebar input { + border: 1px solid #cccccc; + font-family: sans-serif; + font-size: 1.1em; + padding: 0.15em 0.3em; +} + +div.sphinxsidebar input[type=text]{ + margin-left: 20px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #005B81; + text-decoration: none; +} + +a:hover { + color: #E32E00; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Lucida Grande', 'Lucida Sans Unicode', Geneva, Verdana, Arial, sans-serif; + font-weight: bold; + color: #069; + margin: 30px 0px 10px 0px; + padding: 5px 0 5px 0px; + text-shadow: 0px 1px 0 white; + border-bottom: 1px solid #C8D5E3; +} + +div.body h1 { margin-top: 0; font-size: 165%; } +div.body h2 { font-size: 135%; } +div.body h3 { font-size: 120%; } +div.body h4 { font-size: 110%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li { + line-height: 1.5em; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.highlight{ + background-color: white; +} + +div.note { + background-color: #eeeeee; + border: 1px solid #cccccc; +} + +div.seealso { + background-color: #ffffcc; + border: 1px solid #ffff66; +} + +div.topic { + background-color: #fafafa; + border-width: 0; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #ff6666; +} + + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 10px; + background-color: #fafafa; + color: #222222; + line-height: 1.5em; + font-size: 1.1em; + margin: 1.5em 0 1.5em 0; + -webkit-box-shadow: 0px 0px 4px #d8d8d8; + -moz-box-shadow: 0px 0px 4px #d8d8d8; + box-shadow: 0px 0px 4px #d8d8d8; +} + +tt { + color: #222222; + padding: 1px 2px; + font-size: 1.2em; + font-family: monospace; +} + +#table-of-contents ul { + padding-left: 2em; +} + +div.sphinxsidebarwrapper div a {margin: 0.7em;} \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/boto_theme/static/pygments.css b/desktop/core/ext-py/boto-2.38.0/docs/source/boto_theme/static/pygments.css new file mode 100644 index 0000000000000000000000000000000000000000..1f2d2b61871ec0f3e33776555e196834b754ae60 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/boto_theme/static/pygments.css @@ -0,0 +1,61 @@ +.hll { background-color: #ffffcc } +.c { color: #408090; font-style: italic } /* Comment */ +.err { border: 1px solid #FF0000 } /* Error */ +.k { color: #007020; font-weight: bold } /* Keyword */ +.o { color: #666666 } /* Operator */ +.cm { color: #408090; font-style: italic } /* Comment.Multiline */ +.cp { color: #007020 } /* Comment.Preproc */ +.c1 { color: #408090; font-style: italic } /* Comment.Single */ +.cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ +.gd { color: #A00000 } /* Generic.Deleted */ +.ge { font-style: italic } /* Generic.Emph */ +.gr { color: #FF0000 } /* Generic.Error */ +.gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.gi { color: #00A000 } /* Generic.Inserted */ +.go { color: #303030 } /* Generic.Output */ +.gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ +.gs { font-weight: bold } /* Generic.Strong */ +.gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.gt { color: #0040D0 } /* Generic.Traceback */ +.kc { color: #007020; font-weight: bold } /* Keyword.Constant */ +.kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ +.kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ +.kp { color: #007020 } /* Keyword.Pseudo */ +.kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ +.kt { color: #902000 } /* Keyword.Type */ +.m { color: #208050 } /* Literal.Number */ +.s { color: #4070a0 } /* Literal.String */ +.na { color: #4070a0 } /* Name.Attribute */ +.nb { color: #007020 } /* Name.Builtin */ +.nc { color: #0e84b5; font-weight: bold } /* Name.Class */ +.no { color: #60add5 } /* Name.Constant */ +.nd { color: #555555; font-weight: bold } /* Name.Decorator */ +.ni { color: #d55537; font-weight: bold } /* Name.Entity */ +.ne { color: #007020 } /* Name.Exception */ +.nf { color: #06287e } /* Name.Function */ +.nl { color: #002070; font-weight: bold } /* Name.Label */ +.nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ +.nt { color: #062873; font-weight: bold } /* Name.Tag */ +.nv { color: #bb60d5 } /* Name.Variable */ +.ow { color: #007020; font-weight: bold } /* Operator.Word */ +.w { color: #bbbbbb } /* Text.Whitespace */ +.mf { color: #208050 } /* Literal.Number.Float */ +.mh { color: #208050 } /* Literal.Number.Hex */ +.mi { color: #208050 } /* Literal.Number.Integer */ +.mo { color: #208050 } /* Literal.Number.Oct */ +.sb { color: #4070a0 } /* Literal.String.Backtick */ +.sc { color: #4070a0 } /* Literal.String.Char */ +.sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ +.s2 { color: #4070a0 } /* Literal.String.Double */ +.se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ +.sh { color: #4070a0 } /* Literal.String.Heredoc */ +.si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ +.sx { color: #c65d09 } /* Literal.String.Other */ +.sr { color: #235388 } /* Literal.String.Regex */ +.s1 { color: #4070a0 } /* Literal.String.Single */ +.ss { color: #517918 } /* Literal.String.Symbol */ +.bp { color: #007020 } /* Name.Builtin.Pseudo */ +.vc { color: #bb60d5 } /* Name.Variable.Class */ +.vg { color: #bb60d5 } /* Name.Variable.Global */ +.vi { color: #bb60d5 } /* Name.Variable.Instance */ +.il { color: #208050 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/boto_theme/theme.conf b/desktop/core/ext-py/boto-2.38.0/docs/source/boto_theme/theme.conf new file mode 100644 index 0000000000000000000000000000000000000000..7d09085abb25456eb227e3d1e5c3f635f789f6a8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/boto_theme/theme.conf @@ -0,0 +1,3 @@ +[theme] +inherit = basic +stylesheet = boto.css \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/cloudfront_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/cloudfront_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..cd33056ed55bd7846d17f47fe421900169fa7502 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/cloudfront_tut.rst @@ -0,0 +1,197 @@ +.. _cloudfront_tut: + +========== +CloudFront +========== + +This new boto module provides an interface to Amazon's Content Service, +CloudFront. + +.. warning:: + + This module is not well tested. Paging of distributions is not yet + supported. CNAME support is completely untested. Use with caution. + Feedback and bug reports are greatly appreciated. + +Creating a CloudFront connection +-------------------------------- +If you've placed your credentials in your ``$HOME/.boto`` config file then you +can simply create a CloudFront connection using:: + + >>> import boto + >>> c = boto.connect_cloudfront() + +If you do not have this file you will need to specify your AWS access key and +secret access key:: + + >>> import boto + >>> c = boto.connect_cloudfront('your-aws-access-key-id', 'your-aws-secret-access-key') + +Working with CloudFront Distributions +------------------------------------- +Create a new :class:`boto.cloudfront.distribution.Distribution`:: + + >>> origin = boto.cloudfront.origin.S3Origin('mybucket.s3.amazonaws.com') + >>> distro = c.create_distribution(origin=origin, enabled=False, comment='My new Distribution') + >>> d.domain_name + u'd2oxf3980lnb8l.cloudfront.net' + >>> d.id + u'ECH69MOIW7613' + >>> d.status + u'InProgress' + >>> d.config.comment + u'My new distribution' + >>> d.config.origin + + >>> d.config.caller_reference + u'31b8d9cf-a623-4a28-b062-a91856fac6d0' + >>> d.config.enabled + False + +Note that a new caller reference is created automatically, using +uuid.uuid4(). The :class:`boto.cloudfront.distribution.Distribution`, +:class:`boto.cloudfront.distribution.DistributionConfig` and +:class:`boto.cloudfront.distribution.DistributionSummary` objects are defined +in the :mod:`boto.cloudfront.distribution` module. + +To get a listing of all current distributions:: + + >>> rs = c.get_all_distributions() + >>> rs + [, + ] + +This returns a list of :class:`boto.cloudfront.distribution.DistributionSummary` +objects. Note that paging is not yet supported! To get a +:class:`boto.cloudfront.distribution.DistributionObject` from a +:class:`boto.cloudfront.distribution.DistributionSummary` object:: + + >>> ds = rs[1] + >>> distro = ds.get_distribution() + >>> distro.domain_name + u'd2oxf3980lnb8l.cloudfront.net' + +To change a property of a distribution object:: + + >>> distro.comment + u'My new distribution' + >>> distro.update(comment='This is a much better comment') + >>> distro.comment + 'This is a much better comment' + +You can also enable/disable a distribution using the following +convenience methods:: + + >>> distro.enable() # just calls distro.update(enabled=True) + +or:: + + >>> distro.disable() # just calls distro.update(enabled=False) + +The only attributes that can be updated for a Distribution are +comment, enabled and cnames. + +To delete a :class:`boto.cloudfront.distribution.Distribution`:: + + >>> distro.delete() + +Invalidating CloudFront Distribution Paths +------------------------------------------ +Invalidate a list of paths in a CloudFront distribution:: + + >>> paths = ['/path/to/file1.html', '/path/to/file2.html', ...] + >>> inval_req = c.create_invalidation_request(u'ECH69MOIW7613', paths) + >>> print inval_req + + >>> print inval_req.id + u'IFCT7K03VUETK' + >>> print inval_req.paths + [u'/path/to/file1.html', u'/path/to/file2.html', ..] + +.. warning:: + + Each CloudFront invalidation request can only specify up to 1000 paths. If + you need to invalidate more than 1000 paths you will need to split up the + paths into groups of 1000 or less and create multiple invalidation requests. + +This will return a :class:`boto.cloudfront.invalidation.InvalidationBatch` +object representing the invalidation request. You can also fetch a single +invalidaton request for a given distribution using +``invalidation_request_status``:: + + >>> inval_req = c.invalidation_request_status(u'ECH69MOIW7613', u'IFCT7K03VUETK') + >>> print inval_req + + +The first parameter is the CloudFront distribution id the request belongs to +and the second parameter is the invalidation request id. + +It's also possible to get *all* invalidations for a given CloudFront +distribution:: + + >>> invals = c.get_invalidation_requests(u'ECH69MOIW7613') + >>> print invals + + +This will return an instance of +:class:`boto.cloudfront.invalidation.InvalidationListResultSet` which is an +iterable object that contains a list of +:class:`boto.cloudfront.invalidation.InvalidationSummary` objects that describe +each invalidation request and its status:: + + >>> for inval in invals: + >>> print 'Object: %s, ID: %s, Status: %s' % (inval, inval.id, inval.status) + Object: , ID: ICXT2K02SUETK, Status: Completed + Object: , ID: ITV9SV0PDNY1Y, Status: Completed + Object: , ID: I1X3F6N0PLGJN5, Status: Completed + Object: , ID: I1F3G9N0ZLGKN2, Status: Completed + ... + +Simply iterating over the +:class:`boto.cloudfront.invalidation.InvalidationListResultSet` object will +automatically paginate the results on-the-fly as needed by repeatedly +requesting more results from CloudFront until there are none left. + +If you wish to paginate the results manually you can do so by specifying the +``max_items`` option when calling ``get_invalidation_requests``:: + + >>> invals = c.get_invalidation_requests(u'ECH69MOIW7613', max_items=2) + >>> print len(list(invals)) + 2 + >>> for inval in invals: + >>> print 'Object: %s, ID: %s, Status: %s' % (inval, inval.id, inval.status) + Object: , ID: ICXT2K02SUETK, Status: Completed + Object: , ID: ITV9SV0PDNY1Y, Status: Completed + +In this case, iterating over the +:class:`boto.cloudfront.invalidation.InvalidationListResultSet` object will +*only* make a single request to CloudFront and *only* ``max_items`` +invalidation requests are returned by the iterator. To get the next "page" of +results pass the ``next_marker`` attribute of the previous +:class:`boto.cloudfront.invalidation.InvalidationListResultSet` object as the +``marker`` option to the next call to ``get_invalidation_requests``:: + + >>> invals = c.get_invalidation_requests(u'ECH69MOIW7613', max_items=10, marker=invals.next_marker) + >>> print len(list(invals)) + 2 + >>> for inval in invals: + >>> print 'Object: %s, ID: %s, Status: %s' % (inval, inval.id, inval.status) + Object: , ID: I1X3F6N0PLGJN5, Status: Completed + Object: , ID: I1F3G9N0ZLGKN2, Status: Completed + +You can get the :class:`boto.cloudfront.invalidation.InvalidationBatch` object +representing the invalidation request pointed to by a +:class:`boto.cloudfront.invalidation.InvalidationSummary` object using:: + + >>> inval_req = inval.get_invalidation_request() + >>> print inval_req + + +Simiarly you can get the parent +:class:`boto.cloudfront.distribution.Distribution` object for the invalidation +request from a :class:`boto.cloudfront.invalidation.InvalidationSummary` object +using:: + + >>> dist = inval.get_distribution() + >>> print dist + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/cloudsearch_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/cloudsearch_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..13ffd5ef75e17d5c8b51e7ce8268a929e045f3f9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/cloudsearch_tut.rst @@ -0,0 +1,433 @@ +.. cloudsearch_tut: + +=============================================== +An Introduction to boto's Cloudsearch interface +=============================================== + +This tutorial focuses on the boto interface to AWS' Cloudsearch_. This tutorial +assumes that you have boto already downloaded and installed. + +.. _Cloudsearch: http://aws.amazon.com/cloudsearch/ + +Creating a Connection +--------------------- +The first step in accessing CloudSearch is to create a connection to the service. + +The recommended method of doing this is as follows:: + + >>> import boto.cloudsearch + >>> conn = boto.cloudsearch.connect_to_region("us-west-2", + ... aws_access_key_id='', + ... aws_secret_access_key='') + +At this point, the variable conn will point to a CloudSearch connection object +in the us-west-2 region. Available regions for cloudsearch can be found +`here `_. +In this example, the AWS access key and AWS secret key are +passed in to the method explicitly. Alternatively, you can set the environment +variables: + +* `AWS_ACCESS_KEY_ID` - Your AWS Access Key ID +* `AWS_SECRET_ACCESS_KEY` - Your AWS Secret Access Key + +and then simply call:: + + >>> import boto.cloudsearch + >>> conn = boto.cloudsearch.connect_to_region("us-west-2") + +In either case, conn will point to the Connection object which we will use +throughout the remainder of this tutorial. + +Creating a Domain +----------------- + +Once you have a connection established with the CloudSearch service, you will +want to create a domain. A domain encapsulates the data that you wish to index, +as well as indexes and metadata relating to it:: + + >>> from boto.cloudsearch.domain import Domain + >>> domain = Domain(conn, conn.create_domain('demo')) + +This domain can be used to control access policies, indexes, and the actual +document service, which you will use to index and search. + +Setting access policies +----------------------- + +Before you can connect to a document service, you need to set the correct +access properties. For example, if you were connecting from 192.168.1.0, you +could give yourself access as follows:: + + >>> our_ip = '192.168.1.0' + + >>> # Allow our IP address to access the document and search services + >>> policy = domain.get_access_policies() + >>> policy.allow_search_ip(our_ip) + >>> policy.allow_doc_ip(our_ip) + +You can use the :py:meth:`allow_search_ip +` and +:py:meth:`allow_doc_ip ` +methods to give different CIDR blocks access to searching and the document +service respectively. + +Creating index fields +--------------------- + +Each domain can have up to twenty index fields which are indexed by the +CloudSearch service. For each index field, you will need to specify whether +it's a text or integer field, as well as optionaly a default value:: + + >>> # Create an 'text' index field called 'username' + >>> uname_field = domain.create_index_field('username', 'text') + + >>> # Epoch time of when the user last did something + >>> time_field = domain.create_index_field('last_activity', + ... 'uint', + ... default=0) + +It is also possible to mark an index field as a facet. Doing so allows a search +query to return categories into which results can be grouped, or to create +drill-down categories:: + + >>> # But it would be neat to drill down into different countries + >>> loc_field = domain.create_index_field('location', 'text', facet=True) + +Finally, you can also mark a snippet of text as being able to be returned +directly in your search query by using the results option:: + + >>> # Directly insert user snippets in our results + >>> snippet_field = domain.create_index_field('snippet', 'text', result=True) + +You can add up to 20 index fields in this manner:: + + >>> follower_field = domain.create_index_field('follower_count', + ... 'uint', + ... default=0) + +Adding Documents to the Index +----------------------------- + +Now, we can add some documents to our new search domain. First, you will need a +document service object through which queries are sent:: + + >>> doc_service = domain.get_document_service() + +For this example, we will use a pre-populated list of sample content for our +import. You would normally pull such data from your database or another +document store:: + + >>> users = [ + { + 'id': 1, + 'username': 'dan', + 'last_activity': 1334252740, + 'follower_count': 20, + 'location': 'USA', + 'snippet': 'Dan likes watching sunsets and rock climbing', + }, + { + 'id': 2, + 'username': 'dankosaur', + 'last_activity': 1334252904, + 'follower_count': 1, + 'location': 'UK', + 'snippet': 'Likes to dress up as a dinosaur.', + }, + { + 'id': 3, + 'username': 'danielle', + 'last_activity': 1334252969, + 'follower_count': 100, + 'location': 'DE', + 'snippet': 'Just moved to Germany!' + }, + { + 'id': 4, + 'username': 'daniella', + 'last_activity': 1334253279, + 'follower_count': 7, + 'location': 'USA', + 'snippet': 'Just like Dan, I like to watch a good sunset, but heights scare me.', + } + ] + +When adding documents to our document service, we will batch them together. You +can schedule a document to be added by using the :py:meth:`add +` method. Whenever you are adding a +document, you must provide a unique ID, a version ID, and the actual document +to be indexed. In this case, we are using the user ID as our unique ID. The +version ID is used to determine which is the latest version of an object to be +indexed. If you wish to update a document, you must use a higher version ID. In +this case, we are using the time of the user's last activity as a version +number:: + + >>> for user in users: + >>> doc_service.add(user['id'], user['last_activity'], user) + +When you are ready to send the batched request to the document service, you can +do with the :py:meth:`commit +` method. Note that +cloudsearch will charge per 1000 batch uploads. Each batch upload must be under +5MB:: + + >>> result = doc_service.commit() + +The result is an instance of :py:class:`CommitResponse +` which will make the plain +dictionary response a nice object (ie result.adds, result.deletes) and raise an +exception for us if all of our documents weren't actually committed. + +After you have successfully committed some documents to cloudsearch, you must +use :py:meth:`clear_sdf +`, if you wish +to use the same document service connection again so that its internal cache is +cleared. + +Searching Documents +------------------- + +Now, let's try performing a search. First, we will need a +SearchServiceConnection:: + + >>> search_service = domain.get_search_service() + +A standard search will return documents which contain the exact words being +searched for:: + + >>> results = search_service.search(q="dan") + >>> results.hits + 2 + >>> map(lambda x: x['id'], results) + [u'1', u'4'] + +The standard search does not look at word order:: + + >>> results = search_service.search(q="dinosaur dress") + >>> results.hits + 1 + >>> map(lambda x: x['id'], results) + [u'2'] + +It's also possible to do more complex queries using the bq argument (Boolean +Query). When you are using bq, your search terms must be enclosed in single +quotes:: + + >>> results = search_service.search(bq="'dan'") + >>> results.hits + 2 + >>> map(lambda x: x['id'], results) + [u'1', u'4'] + +When you are using boolean queries, it's also possible to use wildcards to +extend your search to all words which start with your search terms:: + + >>> results = search_service.search(bq="'dan*'") + >>> results.hits + 4 + >>> map(lambda x: x['id'], results) + [u'1', u'2', u'3', u'4'] + +The boolean query also allows you to create more complex queries. You can OR +term together using "|", AND terms together using "+" or a space, and you can +remove words from the query using the "-" operator:: + + >>> results = search_service.search(bq="'watched|moved'") + >>> results.hits + 2 + >>> map(lambda x: x['id'], results) + [u'3', u'4'] + +By default, the search will return 10 terms but it is possible to adjust this +by using the size argument as follows:: + + >>> results = search_service.search(bq="'dan*'", size=2) + >>> results.hits + 4 + >>> map(lambda x: x['id'], results) + [u'1', u'2'] + +It is also possible to offset the start of the search by using the start +argument as follows:: + + >>> results = search_service.search(bq="'dan*'", start=2) + >>> results.hits + 4 + >>> map(lambda x: x['id'], results) + [u'3', u'4'] + + +Ordering search results and rank expressions +-------------------------------------------- + +If your search query is going to return many results, it is good to be able to +sort them. You can order your search results by using the rank argument. You are +able to sort on any fields which have the results option turned on:: + + >>> results = search_service.search(bq=query, rank=['-follower_count']) + +You can also create your own rank expressions to sort your results according to +other criteria, such as showing most recently active user, or combining the +recency score with the text_relevance:: + + >>> domain.create_rank_expression('recently_active', 'last_activity') + + >>> domain.create_rank_expression('activish', + ... 'text_relevance + ((follower_count/(time() - last_activity))*1000)') + + >>> results = search_service.search(bq=query, rank=['-recently_active']) + +Viewing and Adjusting Stemming for a Domain +------------------------------------------- + +A stemming dictionary maps related words to a common stem. A stem is +typically the root or base word from which variants are derived. For +example, run is the stem of running and ran. During indexing, Amazon +CloudSearch uses the stemming dictionary when it performs +text-processing on text fields. At search time, the stemming +dictionary is used to perform text-processing on the search +request. This enables matching on variants of a word. For example, if +you map the term running to the stem run and then search for running, +the request matches documents that contain run as well as running. + +To get the current stemming dictionary defined for a domain, use the +:py:meth:`get_stemming ` method:: + + >>> stems = domain.get_stemming() + >>> stems + {u'stems': {}} + >>> + +This returns a dictionary object that can be manipulated directly to +add additional stems for your search domain by adding pairs of term:stem +to the stems dictionary:: + + >>> stems['stems']['running'] = 'run' + >>> stems['stems']['ran'] = 'run' + >>> stems + {u'stems': {u'ran': u'run', u'running': u'run'}} + >>> + +This has changed the value locally. To update the information in +Amazon CloudSearch, you need to save the data:: + + >>> stems.save() + +You can also access certain CloudSearch-specific attributes related to +the stemming dictionary defined for your domain:: + + >>> stems.status + u'RequiresIndexDocuments' + >>> stems.creation_date + u'2012-05-01T12:12:32Z' + >>> stems.update_date + u'2012-05-01T12:12:32Z' + >>> stems.update_version + 19 + >>> + +The status indicates that, because you have changed the stems associated +with the domain, you will need to re-index the documents in the domain +before the new stems are used. + +Viewing and Adjusting Stopwords for a Domain +-------------------------------------------- + +Stopwords are words that should typically be ignored both during +indexing and at search time because they are either insignificant or +so common that including them would result in a massive number of +matches. + +To view the stopwords currently defined for your domain, use the +:py:meth:`get_stopwords ` method:: + + >>> stopwords = domain.get_stopwords() + >>> stopwords + {u'stopwords': [u'a', + u'an', + u'and', + u'are', + u'as', + u'at', + u'be', + u'but', + u'by', + u'for', + u'in', + u'is', + u'it', + u'of', + u'on', + u'or', + u'the', + u'to', + u'was']} + >>> + +You can add additional stopwords by simply appending the values to the +list:: + + >>> stopwords['stopwords'].append('foo') + >>> stopwords['stopwords'].append('bar') + >>> stopwords + +Similarly, you could remove currently defined stopwords from the list. +To save the changes, use the :py:meth:`save +` method:: + + >>> stopwords.save() + +The stopwords object has similar attributes defined above for stemming +that provide additional information about the stopwords in your domain. + + +Viewing and Adjusting Stopwords for a Domain +-------------------------------------------- + +You can configure synonyms for terms that appear in the data you are +searching. That way, if a user searches for the synonym rather than +the indexed term, the results will include documents that contain the +indexed term. + +If you want two terms to match the same documents, you must define +them as synonyms of each other. For example:: + + cat, feline + feline, cat + +To view the synonyms currently defined for your domain, use the +:py:meth:`get_synonyms ` method:: + + >>> synonyms = domain.get_synonyms() + >>> synonyms + {u'synonyms': {}} + >>> + +You can define new synonyms by adding new term:synonyms entries to the +synonyms dictionary object:: + + >>> synonyms['synonyms']['cat'] = ['feline', 'kitten'] + >>> synonyms['synonyms']['dog'] = ['canine', 'puppy'] + +To save the changes, use the :py:meth:`save +` method:: + + >>> synonyms.save() + +The synonyms object has similar attributes defined above for stemming +that provide additional information about the stopwords in your domain. + +Deleting Documents +------------------ + +It is also possible to delete documents:: + + >>> import time + >>> from datetime import datetime + + >>> doc_service = domain.get_document_service() + + >>> # Again we'll cheat and use the current epoch time as our version number + + >>> doc_service.delete(4, int(time.mktime(datetime.utcnow().timetuple()))) + >>> doc_service.commit() diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/cloudwatch_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/cloudwatch_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..027cd98024f89ed257b1a46aeac616868d4ad98f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/cloudwatch_tut.rst @@ -0,0 +1,117 @@ +.. cloudwatch_tut: + +========== +CloudWatch +========== + +First, make sure you have something to monitor. You can either create a +LoadBalancer or enable monitoring on an existing EC2 instance. To enable +monitoring, you can either call the monitor_instance method on the +EC2Connection object or call the monitor method on the Instance object. + +It takes a while for the monitoring data to start accumulating but once +it does, you can do this:: + + >>> import boto.ec2.cloudwatch + >>> c = boto.ec2.cloudwatch.connect_to_region('us-west-2') + >>> metrics = c.list_metrics() + >>> metrics + [Metric:DiskReadBytes, + Metric:CPUUtilization, + Metric:DiskWriteOps, + Metric:DiskWriteOps, + Metric:DiskReadOps, + Metric:DiskReadBytes, + Metric:DiskReadOps, + Metric:CPUUtilization, + Metric:DiskWriteOps, + Metric:NetworkIn, + Metric:NetworkOut, + Metric:NetworkIn, + Metric:DiskReadBytes, + Metric:DiskWriteBytes, + Metric:DiskWriteBytes, + Metric:NetworkIn, + Metric:NetworkIn, + Metric:NetworkOut, + Metric:NetworkOut, + Metric:DiskReadOps, + Metric:CPUUtilization, + Metric:DiskReadOps, + Metric:CPUUtilization, + Metric:DiskWriteBytes, + Metric:DiskWriteBytes, + Metric:DiskReadBytes, + Metric:NetworkOut, + Metric:DiskWriteOps] + + +The list_metrics call will return a list of all of the available metrics +that you can query against. Each entry in the list is a Metric object. +As you can see from the list above, some of the metrics are repeated. The repeated metrics are across different dimensions (per-instance, per-image type, per instance type) which can identified by looking at the dimensions property. + +Because for this example, I'm only monitoring a single instance, the set +of metrics available to me are fairly limited. If I was monitoring many +instances, using many different instance types and AMI's and also several +load balancers, the list of available metrics would grow considerably. + +Once you have the list of available metrics, you can actually +query the CloudWatch system for that metric. +Let's choose the CPU utilization metric for one of the ImageID.:: + >>> m_image = metrics[7] + >>> m_image + Metric:CPUUtilization + >>> m_image.dimensions + {u'ImageId': [u'ami-6ac2a85a']} + +Let's choose another CPU utilization metric for our instance.:: + + >>> m = metrics[20] + >>> m + Metric:CPUUtilization + >>> m.dimensions + {u'InstanceId': [u'i-4ca81747']} + +The Metric object has a query method that lets us actually perform +the query against the collected data in CloudWatch. To call that, +we need a start time and end time to control the time span of data +that we are interested in. For this example, let's say we want the +data for the previous hour:: + + >>> import datetime + >>> end = datetime.datetime.utcnow() + >>> start = end - datetime.timedelta(hours=1) + +We also need to supply the Statistic that we want reported and +the Units to use for the results. The Statistic can be one of these +values:: + + ['Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount'] + +And Units must be one of the following:: + + ['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', None] + +The query method also takes an optional parameter, period. This +parameter controls the granularity (in seconds) of the data returned. +The smallest period is 60 seconds and the value must be a multiple +of 60 seconds. So, let's ask for the average as a percent:: + + >>> datapoints = m.query(start, end, 'Average', 'Percent') + >>> len(datapoints) + 60 + +Our period was 60 seconds and our duration was one hour so +we should get 60 data points back and we can see that we did. +Each element in the datapoints list is a DataPoint object +which is a simple subclass of a Python dict object. Each +Datapoint object contains all of the information available +about that particular data point.:: + + >>> d = datapoints[0] + >>> d + {u'Timestamp': datetime.datetime(2014, 6, 23, 22, 25), + u'Average': 20.0, + u'Unit': u'Percent'} + +My server obviously isn't very busy right now! diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/commandline.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/commandline.rst new file mode 100644 index 0000000000000000000000000000000000000000..6b60482750f31935b5540c203ff72dcabee6a504 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/commandline.rst @@ -0,0 +1,85 @@ +.. _ref-boto_commandline: + +================== +Command Line Tools +================== + +Introduction +============ + +Boto ships with a number of command line utilities, which are installed +when the package is installed. This guide outlines which ones are available +& what they do. + +.. note:: + + If you're not already depending on these utilities, you may wish to check + out the AWS-CLI (http://aws.amazon.com/cli/ - `User Guide`_ & + `Reference Guide`_). It provides much wider & complete access to the + AWS services. + + .. _`User Guide`: http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html + .. _`Reference Guide`: http://docs.aws.amazon.com/cli/latest/reference/ + +The included utilities available are: + +``asadmin`` + Works with Autoscaling + +``bundle_image`` + Creates a bundled AMI in S3 based on a EC2 instance + +``cfadmin`` + Works with CloudFront & invalidations + +``cq`` + Works with SQS queues + +``cwutil`` + Works with CloudWatch + +``dynamodb_dump`` +``dynamodb_load`` + Handle dumping/loading data from DynamoDB tables + +``elbadmin`` + Manages Elastic Load Balancer instances + +``fetch_file`` + Downloads an S3 key to disk + +``glacier`` + Lists vaults, jobs & uploads files to Glacier + +``instance_events`` + Lists all events for EC2 reservations + +``kill_instance`` + Kills a list of EC2 instances + +``launch_instance`` + Launches an EC2 instance + +``list_instances`` + Lists all of your EC2 instances + +``lss3`` + Lists what keys you have within a bucket in S3 + +``mturk`` + Provides a number of facilities for interacting with Mechanical Turk + +``pyami_sendmail`` + Sends an email from the Pyami instance + +``route53`` + Interacts with the Route53 service + +``s3put`` + Uploads a directory or a specific file(s) to S3 + +``sdbadmin`` + Allows for working with SimpleDB domains + +``taskadmin`` + A tool for working with the tasks in SimpleDB diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/conf.py b/desktop/core/ext-py/boto-2.38.0/docs/source/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..4fbbf3fcfdd7089a3e596a68bfcdaecd4b917e59 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/conf.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +import os +import boto +import sys + +sys.path.append(os.path.join(os.path.dirname(__file__), 'extensions')) + +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', + 'githublinks'] +autoclass_content = "both" +templates_path = ['_templates'] +source_suffix = '.rst' +master_doc = 'index' +project = u'boto' +copyright = u'2009,2010, Mitch Garnaat' +version = boto.__version__ +exclude_trees = [] +pygments_style = 'sphinx' +html_theme = 'boto_theme' +html_theme_path = ["."] +html_static_path = ['_static'] +htmlhelp_basename = 'botodoc' +latex_documents = [ + ('index', 'boto.tex', u'boto Documentation', + u'Mitch Garnaat', 'manual'), +] +intersphinx_mapping = {'http://docs.python.org/': None} +github_project_url = 'https://github.com/boto/boto/' + +try: + release = os.environ.get('SVN_REVISION', 'HEAD') + print release +except Exception, e: + print e + +html_title = "boto v%s" % version diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/contributing.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/contributing.rst new file mode 100644 index 0000000000000000000000000000000000000000..80821995e0c7e6e53c521528201e24c98b4fd0ca --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/contributing.rst @@ -0,0 +1,227 @@ +==================== +Contributing to Boto +==================== + + +Setting Up a Development Environment +==================================== + +While not strictly required, it is highly recommended to do development +in a virtualenv. You can install virtualenv using pip:: + + $ pip install virtualenv + +Once the package is installed, you'll have a ``virtualenv`` command you can +use to create a virtual environment:: + + $ virtualenv venv + +You can then activate the virtualenv:: + + $ . venv/bin/activate + +.. note:: + + You may also want to check out virtualenvwrapper_, which is a set of + extensions to virtualenv that makes it easy to manage multiple virtual + environments. + +A requirements.txt is included with boto which contains all the additional +packages needed for boto development. You can install these packages by +running:: + + $ pip install -r requirements.txt + + +Running the Tests +================= + +All of the tests for boto are under the ``tests/`` directory. The tests for +boto have been split into two main categories, unit and integration tests: + +* **unit** - These are tests that do not talk to any AWS services. Anyone + should be able to run these tests without have any credentials + configured. These are the types of tests that could be run in something + like a public CI server. These tests tend to be fast. + +* **integration** - These are tests that will talk to AWS services, and + will typically require a boto config file with valid credentials. + Due to the nature of these tests, they tend to take a while to run. + Also keep in mind anyone who runs these tests will incur any usage + fees associated with the various AWS services. + +To run all the unit tests, cd to the ``tests/`` directory and run:: + + $ python test.py unit + +You should see output like this:: + + $ python test.py unit + ................................ + ---------------------------------------------------------------------- + Ran 32 tests in 0.075s + + OK + +To run the integration tests, run:: + + $ python test.py integration + +Note that running the integration tests may take a while. + +Various integration tests have been tagged with service names to allow +you to easily run tests by service type. For example, to run the ec2 +integration tests you can run:: + + $ python test.py -t ec2 + +You can specify the ``-t`` argument multiple times. For example, to +run the s3 and ec2 tests you can run:: + + $ python test.py -t ec2 -t s3 + +.. warning:: + + In the examples above no top level directory was specified. By default, + nose will assume the current working directory, so the above command is + equivalent to:: + + $ python test.py -t ec2 -t s3 . + + Be sure that you are in the ``tests/`` directory when running the tests, + or explicitly specify the top level directory. For example, if you in the + root directory of the boto repo, you could run the ec2 and s3 tests by + running:: + + $ python tests/test.py -t ec2 -t s3 tests/ + + +You can use nose's collect plugin to see what tests are associated with each +service tag:: + + $ python tests.py -t s3 -t ec2 --with-id --collect -v + + +Testing Details +--------------- + +The ``tests/test.py`` script is a lightweight wrapper around nose_. In +general, you should be able to run ``nosetests`` directly instead of +``tests/test.py``. The ``tests/unit`` and ``tests/integration`` args +in the commands above were referring to directories. The command line +arguments are forwarded to nose when you use ``tests/test.py``. For example, +you can run:: + + $ python tests/test.py -x -vv tests/unit/cloudformation + +And the ``-x -vv tests/unit/cloudformation`` are forwarded to nose. See +the nose_ docs for the supported command line options, or run +``nosetests --help``. + +The only thing that ``tests/test.py`` does before invoking nose is to +inject an argument that specifies that any testcase tagged with "notdefault" +should not be run. A testcase may be tagged with "notdefault" if the test +author does not want everyone to run the tests. In general, there shouldn't be +many of these tests, but some reasons a test may be tagged "notdefault" +include: + +* An integration test that requires specific credentials. +* An interactive test (the S3 MFA tests require you to type in the S/N and + code). + +Tagging is done using nose's tagging_ plugin. To summarize, you can tag a +specific testcase by setting an attribute on the object. Nose provides +an ``attr`` decorator for convenience:: + + from nose.plugins.attrib import attr + + @attr('notdefault') + def test_s3_mfs(): + pass + +You can then run these tests be specifying:: + + nosetests -a 'notdefault' + +Or you can exclude any tests tagged with 'notdefault' by running:: + + nosetests -a '!notdefault' + +Conceptually, ``tests/test.py`` is injecting the "-a !notdefault" arg +into nosetests. + + +Testing Supported Python Versions +================================== + +Boto supports python 2.6 and 2.7. An easy way to verify functionality +across multiple python versions is to use tox_. A tox.ini file is included +with boto. You can run tox with no args and it will automatically test +all supported python versions:: + + $ tox + GLOB sdist-make: boto/setup.py + py26 sdist-reinst: boto/.tox/dist/boto-2.4.1.zip + py26 runtests: commands[0] + ................................ + ---------------------------------------------------------------------- + Ran 32 tests in 0.089s + + OK + py27 sdist-reinst: boto/.tox/dist/boto-2.4.1.zip + py27 runtests: commands[0] + ................................ + ---------------------------------------------------------------------- + Ran 32 tests in 0.087s + + OK + ____ summary ____ + py26: commands succeeded + py27: commands succeeded + congratulations :) + + +Writing Documentation +===================== + +The boto docs use sphinx_ to generate documentation. All of the docs are +located in the ``docs/`` directory. To generate the html documentation, cd +into the docs directory and run ``make html``:: + + $ cd docs + $ make html + +The generated documentation will be in the ``docs/build/html`` directory. +The source for the documentation is located in ``docs/source`` directory, +and uses `restructured text`_ for the markup language. + + +.. _nose: http://readthedocs.org/docs/nose/en/latest/ +.. _tagging: http://nose.readthedocs.org/en/latest/plugins/attrib.html +.. _tox: http://tox.testrun.org/latest/ +.. _virtualenvwrapper: http://www.doughellmann.com/projects/virtualenvwrapper/ +.. _sphinx: http://sphinx.pocoo.org/ +.. _restructured text: http://sphinx.pocoo.org/rest.html + + +Merging A Branch (Core Devs) +============================ + +* All features/bugfixes should go through a review. + + * This includes new features added by core devs themselves. The usual + branch/pull-request/merge flow that happens for community contributions + should also apply to core. + +* Ensure there is proper test coverage. If there's a change in behavior, there + should be a test demonstrating the failure before the change & passing with + the change. + + * This helps ensure we don't regress in the future as well. + +* Merging of pull requests is typically done with + ``git merge --no-ff ``. + + * GitHub's big green button is probably OK for very small PRs (like doc + fixes), but you can't run tests on GH, so most things should get pulled + down locally. diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/documentation.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/documentation.rst new file mode 100644 index 0000000000000000000000000000000000000000..d4999d99daa1f64126b50815bdf7c9f332f62c69 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/documentation.rst @@ -0,0 +1,59 @@ +.. _documentation: + +======================= +About the Documentation +======================= + +boto's documentation uses the Sphinx__ documentation system, which in turn is +based on docutils__. The basic idea is that lightly-formatted plain-text +documentation is transformed into HTML, PDF, and any other output format. + +__ http://sphinx.pocoo.org/ +__ http://docutils.sf.net/ + +To actually build the documentation locally, you'll currently need to install +Sphinx -- ``easy_install Sphinx`` should do the trick. + +Then, building the html is easy; just ``make html`` from the ``docs`` directory. + +To get started contributing, you'll want to read the `ReStructuredText +Primer`__. After that, you'll want to read about the `Sphinx-specific markup`__ +that's used to manage metadata, indexing, and cross-references. + +__ http://sphinx.pocoo.org/rest.html +__ http://sphinx.pocoo.org/markup/ + +The main thing to keep in mind as you write and edit docs is that the more +semantic markup you can add the better. So:: + + Import ``boto`` to your script... + +Isn't nearly as helpful as:: + + Add :mod:`boto` to your script... + +This is because Sphinx will generate a proper link for the latter, which greatly +helps readers. There's basically no limit to the amount of useful markup you can +add. + + +The fabfile +----------- + +There is a Fabric__ file that can be used to build and deploy the documentation +to a webserver that you ssh access to. + +__ http://fabfile.org + +To build and deploy:: + + cd docs/ + fab deploy:remote_path='/var/www/folder/whatever' --hosts=user@host + +This will get the latest code from subversion, add the revision number to the +docs conf.py file, call ``make html`` to build the documentation, then it will +tarball it up and scp up to the host you specified and untarball it in the +folder you specified creating a symbolic link from the untarballed versioned +folder to ``{remote_path}/boto-docs``. + + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/dynamodb2_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/dynamodb2_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..0e4b5b875f6959784434f414755fdf6ce7477049 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/dynamodb2_tut.rst @@ -0,0 +1,700 @@ +.. _dynamodb2_tut: + +=============================================== +An Introduction to boto's DynamoDB v2 interface +=============================================== + +This tutorial focuses on the boto interface to AWS' DynamoDB_ v2. This tutorial +assumes that you have boto already downloaded and installed. + +.. _DynamoDB: http://aws.amazon.com/dynamodb/ + +.. warning:: + + This tutorial covers the **SECOND** major release of DynamoDB (including + local secondary index support). The documentation for the original + version of DynamoDB (& boto's support for it) is at + :doc:`DynamoDB v1 `. + +The v2 DynamoDB API has both a high-level & low-level component. The low-level +API (contained primarily within ``boto.dynamodb2.layer1``) provides an +interface that rough matches exactly what is provided by the API. It supports +all options available to the service. + +The high-level API attempts to make interacting with the service more natural +from Python. It supports most of the featureset. + + +The High-Level API +================== + +Most of the interaction centers around a single object, the ``Table``. Tables +act as a way to effectively namespace your records. If you're familiar with +database tables from an RDBMS, tables will feel somewhat familiar. + + +Creating a New Table +-------------------- + +To create a new table, you need to call ``Table.create`` & specify (at a +minimum) both the table's name as well as the key schema for the table:: + + >>> from boto.dynamodb2.fields import HashKey + >>> from boto.dynamodb2.table import Table + >>> users = Table.create('users', schema=[HashKey('username')]); + +Since both the key schema and local secondary indexes can not be +modified after the table is created, you'll need to plan ahead of time how you +think the table will be used. Both the keys & indexes are also used for +querying, so you'll want to represent the data you'll need when querying +there as well. + +For the schema, you can either have a single ``HashKey`` or a combined +``HashKey+RangeKey``. The ``HashKey`` by itself should be thought of as a +unique identifier (for instance, like a username or UUID). It is typically +looked up as an exact value. +A ``HashKey+RangeKey`` combination is slightly different, in that the +``HashKey`` acts like a namespace/prefix & the ``RangeKey`` acts as a value +that can be referred to by a sorted range of values. + +For the local secondary indexes, you can choose from an ``AllIndex``, a +``KeysOnlyIndex`` or a ``IncludeIndex`` field. Each builds an index of values +that can be queried on. The ``AllIndex`` duplicates all values onto the index +(to prevent additional reads to fetch the data). The ``KeysOnlyIndex`` +duplicates only the keys from the schema onto the index. The ``IncludeIndex`` +lets you specify a list of fieldnames to duplicate over. + +A full example:: + + >>> import boto.dynamodb2 + >>> from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex, GlobalAllIndex + >>> from boto.dynamodb2.table import Table + >>> from boto.dynamodb2.types import NUMBER + + # Uses your ``aws_access_key_id`` & ``aws_secret_access_key`` from either a + # config file or environment variable & the default region. + >>> users = Table.create('users', schema=[ + ... HashKey('username'), # defaults to STRING data_type + ... RangeKey('last_name'), + ... ], throughput={ + ... 'read': 5, + ... 'write': 15, + ... }, global_indexes=[ + ... GlobalAllIndex('EverythingIndex', parts=[ + ... HashKey('account_type'), + ... ], + ... throughput={ + ... 'read': 1, + ... 'write': 1, + ... }) + ... ], + ... # If you need to specify custom parameters, such as credentials or region, + ... # use the following: + ... # connection=boto.dynamodb2.connect_to_region('us-east-1') + ... ) + + +Using an Existing Table +----------------------- + +Once a table has been created, using it is relatively simple. You can either +specify just the ``table_name`` (allowing the object to lazily do an additional +call to get details about itself if needed) or provide the ``schema/indexes`` +again (same as what was used with ``Table.create``) to avoid extra overhead. + +Lazy example:: + + >>> from boto.dynamodb2.table import Table + >>> users = Table('users') + +Efficient example:: + + >>> from boto.dynamodb2.fields import HashKey, RangeKey, GlobalAllIndex + >>> from boto.dynamodb2.table import Table + >>> from boto.dynamodb2.types import NUMBER + >>> users = Table('users', schema=[ + ... HashKey('username'), + ... RangeKey('last_name'), + ... ], global_indexes=[ + ... GlobalAllIndex('EverythingIndex', parts=[ + ... HashKey('account_type'), + ... ]) + ... ]) + + +Creating a New Item +------------------- + +Once you have a ``Table`` instance, you can add new items to the table. There +are two ways to do this. + +The first is to use the ``Table.put_item`` method. Simply hand it a dictionary +of data & it will create the item on the server side. This dictionary should +be relatively flat (as you can nest in other dictionaries) & **must** contain +the keys used in the ``schema``. + +Example:: + + >>> from boto.dynamodb2.table import Table + >>> users = Table('users') + + # Create the new user. + >>> users.put_item(data={ + ... 'username': 'johndoe', + ... 'first_name': 'John', + ... 'last_name': 'Doe', + ... 'account_type': 'standard_user', + ... }) + True + +The alternative is to manually construct an ``Item`` instance & tell it to +``save`` itself. This is useful if the object will be around for awhile & you +don't want to re-fetch it. + +Example:: + + >>> from boto.dynamodb2.items import Item + >>> from boto.dynamodb2.table import Table + >>> users = Table('users') + + # WARNING - This doens't save it yet! + >>> janedoe = Item(users, data={ + ... 'username': 'janedoe', + ... 'first_name': 'Jane', + ... 'last_name': 'Doe', + ... 'account_type': 'standard_user', + ... }) + + # The data now gets persisted to the server. + >>> janedoe.save() + True + + +Getting an Item & Accessing Data +-------------------------------- + +With data now in DynamoDB, if you know the key of the item, you can fetch it +back out. Specify the key value(s) as kwargs to ``Table.get_item``. + +Example:: + + >>> from boto.dynamodb2.table import Table + >>> users = Table('users') + + >>> johndoe = users.get_item(username='johndoe', last_name='Doe') + +Once you have an ``Item`` instance, it presents a dictionary-like interface to +the data.:: + + # Read a field out. + >>> johndoe['first_name'] + 'John' + + # Change a field (DOESN'T SAVE YET!). + >>> johndoe['first_name'] = 'Johann' + + # Delete data from it (DOESN'T SAVE YET!). + >>> del johndoe['account_type'] + + +Updating an Item +---------------- + +Just creating new items or changing only the in-memory version of the ``Item`` +isn't particularly effective. To persist the changes to DynamoDB, you have +three choices. + +The first is sending all the data with the expectation nothing has changed +since you read the data. DynamoDB will verify the data is in the original state +and, if so, will send all of the item's data. If that expectation fails, the +call will fail:: + + >>> from boto.dynamodb2.table import Table + >>> users = Table('users') + + >>> johndoe = users.get_item(username='johndoe', last_name='Doe') + >>> johndoe['first_name'] = 'Johann' + >>> johndoe['whatever'] = "man, that's just like your opinion" + >>> del johndoe['account_type'] + + # Affects all fields, even the ones not changed locally. + >>> johndoe.save() + True + +The second is a full overwrite. If you can be confident your version of the +data is the most correct, you can force an overwrite of the data.:: + + >>> johndoe = users.get_item(username='johndoe', last_name='Doe') + >>> johndoe['first_name'] = 'Johann' + >>> johndoe['whatever'] = "Man, that's just like your opinion" + + # Specify ``overwrite=True`` to fully replace the data. + >>> johndoe.save(overwrite=True) + True + +The last is a partial update. If you've only modified certain fields, you +can send a partial update that only writes those fields, allowing other +(potentially changed) fields to go untouched.:: + + >>> johndoe = users.get_item(username='johndoe', last_name='Doe') + >>> johndoe['first_name'] = 'Johann' + >>> johndoe['whatever'] = "man, that's just like your opinion" + >>> del johndoe['account_type'] + + # Partial update, only sending/affecting the + # ``first_name/whatever/account_type`` fields. + >>> johndoe.partial_save() + True + + +Deleting an Item +---------------- + +You can also delete items from the table. You have two choices, depending on +what data you have present. + +If you already have an ``Item`` instance, the easiest approach is just to call +``Item.delete``.:: + + >>> johndoe.delete() + True + +If you don't have an ``Item`` instance & you don't want to incur the +``Table.get_item`` call to get it, you can call ``Table.delete_item`` method.:: + + >>> from boto.dynamodb2.table import Table + >>> users = Table('users') + + >>> users.delete_item(username='johndoe', last_name='Doe') + True + + +Batch Writing +------------- + +If you're loading a lot of data at a time, making use of batch writing can +both speed up the process & reduce the number of write requests made to the +service. + +Batch writing involves wrapping the calls you want batched in a context manager. +The context manager imitates the ``Table.put_item`` & ``Table.delete_item`` +APIs. Getting & using the context manager looks like:: + + >>> import time + >>> from boto.dynamodb2.table import Table + >>> users = Table('users') + + >>> with users.batch_write() as batch: + ... batch.put_item(data={ + ... 'username': 'anotherdoe', + ... 'first_name': 'Another', + ... 'last_name': 'Doe', + ... 'date_joined': int(time.time()), + ... }) + ... batch.put_item(data={ + ... 'username': 'joebloggs', + ... 'first_name': 'Joe', + ... 'last_name': 'Bloggs', + ... 'date_joined': int(time.time()), + ... }) + ... batch.delete_item(username='janedoe', last_name='Doe') + +However, there are some limitations on what you can do within the context +manager. + +* It can't read data at all or do batch any other operations. +* You can't put & delete the same data within a batch request. + +.. note:: + + Additionally, the context manager can only batch 25 items at a time for a + request (this is a DynamoDB limitation). It is handled for you so you can + keep writing additional items, but you should be aware that 100 ``put_item`` + calls is 4 batch requests, not 1. + + +Querying +-------- + +.. warning:: + + The ``Table`` object has both a ``query`` & a ``query_2`` method. If you + are writing new code, **DO NOT** use ``Table.query``. It presents results + in an incorrect order than expected & is strictly present for + backward-compatibility. + +Manually fetching out each item by itself isn't tenable for large datasets. +To cope with fetching many records, you can either perform a standard query, +query via a local secondary index or scan the entire table. + +A standard query typically gets run against a hash+range key combination. +Filter parameters are passed as kwargs & use a ``__`` to separate the fieldname +from the operator being used to filter the value. + +In terms of querying, our original schema is less than optimal. For the +following examples, we'll be using the following table setup:: + + >>> from boto.dynamodb2.fields import HashKey, RangeKey, GlobalAllIndex + >>> from boto.dynamodb2.table import Table + >>> from boto.dynamodb2.types import NUMBER + >>> import time + >>> users = Table.create('users2', schema=[ + ... HashKey('account_type'), + ... RangeKey('last_name'), + ... ], throughput={ + ... 'read': 5, + ... 'write': 15, + ... }, global_indexes=[ + ... GlobalAllIndex('DateJoinedIndex', parts=[ + ... HashKey('account_type'), + ... RangeKey('date_joined', data_type=NUMBER), + ... ], + ... throughput={ + ... 'read': 1, + ... 'write': 1, + ... }), + ... ]) + +And the following data:: + + >>> with users.batch_write() as batch: + ... batch.put_item(data={ + ... 'account_type': 'standard_user', + ... 'first_name': 'John', + ... 'last_name': 'Doe', + ... 'is_owner': True, + ... 'email': True, + ... 'date_joined': int(time.time()) - (60*60*2), + ... }) + ... batch.put_item(data={ + ... 'account_type': 'standard_user', + ... 'first_name': 'Jane', + ... 'last_name': 'Doering', + ... 'date_joined': int(time.time()) - 2, + ... }) + ... batch.put_item(data={ + ... 'account_type': 'standard_user', + ... 'first_name': 'Bob', + ... 'last_name': 'Doerr', + ... 'date_joined': int(time.time()) - (60*60*3), + ... }) + ... batch.put_item(data={ + ... 'account_type': 'super_user', + ... 'first_name': 'Alice', + ... 'last_name': 'Liddel', + ... 'is_owner': True, + ... 'email': True, + ... 'date_joined': int(time.time()) - 1, + ... }) + +When executing the query, you get an iterable back that contains your results. +These results may be spread over multiple requests as DynamoDB paginates them. +This is done transparently, but you should be aware it may take more than one +request. + +To run a query for last names starting with the letter "D":: + + >>> names_with_d = users.query_2( + ... account_type__eq='standard_user', + ... last_name__beginswith='D' + ... ) + + >>> for user in names_with_d: + ... print user['first_name'] + 'John' + 'Jane' + 'Bob' + +You can also reverse results (``reverse=True``) as well as limiting them +(``limit=2``):: + + >>> rev_with_d = users.query_2( + ... account_type__eq='standard_user', + ... last_name__beginswith='D', + ... reverse=True, + ... limit=2 + ... ) + + >>> for user in rev_with_d: + ... print user['first_name'] + 'Bob' + 'Jane' + +You can also run queries against the local secondary indexes. Simply provide +the index name (``index='DateJoinedIndex'``) & filter parameters against its +fields:: + + # Users within the last hour. + >>> recent = users.query_2( + ... account_type__eq='standard_user', + ... date_joined__gte=time.time() - (60 * 60), + ... index='DateJoinedIndex' + ... ) + + >>> for user in recent: + ... print user['first_name'] + 'Jane' + +By default, DynamoDB can return a large amount of data per-request (up to 1Mb +of data). To prevent these requests from drowning other smaller gets, you can +specify a smaller page size via the ``max_page_size`` argument to +``Table.query_2`` & ``Table.scan``. Doing so looks like:: + + # Small pages yield faster responses & less potential of drowning other + # requests. + >>> all_users = users.query_2( + ... account_type__eq='standard_user', + ... date_joined__gte=0, + ... index='DateJoinedIndex', + ... max_page_size=10 + ... ) + + # Usage is the same, but now many smaller requests are done. + >>> for user in all_users: + ... print user['first_name'] + 'Bob' + 'John' + 'Jane' + +Finally, if you need to query on data that's not in either a key or in an +index, you can run a ``Table.scan`` across the whole table, which accepts a +similar but expanded set of filters. If you're familiar with the Map/Reduce +concept, this is akin to what DynamoDB does. + +.. warning:: + + Scans are eventually consistent & run over the entire table, so + relatively speaking, they're more expensive than plain queries or queries + against an LSI. + +An example scan of all records in the table looks like:: + + >>> all_users = users.scan() + +Filtering a scan looks like:: + + >>> owners_with_emails = users.scan( + ... is_owner__eq=True, + ... email__null=False, + ... ) + + >>> for user in owners_with_emails: + ... print user['first_name'] + 'John' + 'Alice' + + +The ``ResultSet`` +~~~~~~~~~~~~~~~~~ + +Both ``Table.query_2`` & ``Table.scan`` return an object called ``ResultSet``. +It's a lazily-evaluated object that uses the `Iterator protocol`_. It delays +your queries until you request the next item in the result set. + +Typical use is simply a standard ``for`` to iterate over the results:: + + >>> result_set = users.scan() + >>> for user in result_set: + ... print user['first_name'] + 'John' + 'Jane' + 'Bob' + 'Alice' + +However, this throws away results as it fetches more data. As a result, you +can't index it like a ``list``:: + + >>> len(result_set) + TypeError: object of type 'ResultSet' has no len() + +Because it does this, if you need to loop over your results more than once (or +do things like negative indexing, length checks, etc.), you should wrap it in +a call to ``list()``. Ex.:: + + >>> result_set = users.scan() + >>> all_users = list(result_set) + # Slice it for every other user. + >>> for user in all_users[::2]: + ... print user['first_name'] + 'John' + 'Bob' + +.. warning:: + + Wrapping calls like the above in ``list(...)`` **WILL** cause it to evaluate + the **ENTIRE** potentially large data set. + + Appropriate use of the ``limit=...`` kwarg to ``Table.query_2`` & + ``Table.scan`` calls are **VERY** important should you chose to do this. + + Alternatively, you can build your own list, using ``for`` on the + ``ResultSet`` to lazily build the list (& potentially stop early). + +.. _`Iterator protocol`: http://docs.python.org/2/library/stdtypes.html#iterator-types + + +Parallel Scan +------------- + +DynamoDB also includes a feature called "Parallel Scan", which allows you +to make use of **extra** read capacity to divide up your result set & scan +an entire table faster. + +This does require extra code on the user's part & you should ensure that +you need the speed boost, have enough data to justify it and have the extra +capacity to read it without impacting other queries/scans. + +To run it, you should pick the ``total_segments`` to use, which is an integer +representing the number of temporary partitions you'd divide your table into. +You then need to spin up a thread/process for each one, giving each +thread/process a ``segment``, which is a zero-based integer of the segment +you'd like to scan. + +An example of using parallel scan to send out email to all users might look +something like:: + + #!/usr/bin/env python + import threading + + import boto.ses + import boto.dynamodb2 + from boto.dynamodb2.table import Table + + + AWS_ACCESS_KEY_ID = '' + AWS_SECRET_ACCESS_KEY = '' + APPROVED_EMAIL = 'some@address.com' + + + def send_email(email): + # Using Amazon's Simple Email Service, send an email to a given + # email address. You must already have an email you've verified with + # AWS before this will work. + conn = boto.ses.connect_to_region( + 'us-east-1', + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY + ) + conn.send_email( + APPROVED_EMAIL, + "[OurSite] New feature alert!", + "We've got some exciting news! We added a new feature to...", + [email] + ) + + + def process_segment(segment=0, total_segments=10): + # This method/function is executed in each thread, each getting its + # own segment to process through. + conn = boto.dynamodb2.connect_to_region( + 'us-east-1', + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY + ) + table = Table('users', connection=conn) + + # We pass in the segment & total_segments to scan here. + for user in table.scan(segment=segment, total_segments=total_segments): + send_email(user['email']) + + + def send_all_emails(): + pool = [] + # We're choosing to divide the table in 3, then... + pool_size = 3 + + # ...spinning up a thread for each segment. + for i in range(pool_size): + worker = threading.Thread( + target=process_segment, + kwargs={ + 'segment': i, + 'total_segments': pool_size, + } + ) + pool.append(worker) + # We start them to let them start scanning & consuming their + # assigned segment. + worker.start() + + # Finally, we wait for each to finish. + for thread in pool: + thread.join() + + + if __name__ == '__main__': + send_all_emails() + + +Batch Reading +------------- + +Similar to batch writing, batch reading can also help reduce the number of +API requests necessary to access a large number of items. The +``Table.batch_get`` method takes a list (or any sliceable collection) of keys +& fetches all of them, presented as an iterator interface. + +This is done lazily, so if you never iterate over the results, no requests are +executed. Additionally, if you only iterate over part of the set, the minumum +number of calls are made to fetch those results (typically max 100 per +response). + +Example:: + + >>> from boto.dynamodb2.table import Table + >>> users = Table('users2') + + # No request yet. + >>> many_users = users.batch_get(keys=[ + ... {'account_type': 'standard_user', 'last_name': 'Doe'}, + ... {'account_type': 'standard_user', 'last_name': 'Doering'}, + ... {'account_type': 'super_user', 'last_name': 'Liddel'}, + ... ]) + + # Now the request is performed, requesting all five in one request. + >>> for user in many_users: + ... print user['first_name'] + 'Alice' + 'John' + 'Jane' + + +Deleting a Table +---------------- + +Deleting a table is a simple exercise. When you no longer need a table, simply +run:: + + >>> users.delete() + + +DynamoDB Local +-------------- + +`Amazon DynamoDB Local`_ is a utility which can be used to mock DynamoDB +during development. Connecting to a running DynamoDB Local server is easy:: + + #!/usr/bin/env python + from boto.dynamodb2.layer1 import DynamoDBConnection + + + # Connect to DynamoDB Local + conn = DynamoDBConnection( + host='localhost', + port=8000, + aws_access_key_id='anything', + aws_secret_access_key='anything', + is_secure=False) + + # List all local tables + tables = conn.list_tables() + + +.. _`Amazon DynamoDB Local`: http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tools.html + + +Next Steps +---------- + +You can find additional information about other calls & parameter options +in the :doc:`API docs `. diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/dynamodb_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/dynamodb_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..7479058f90d157ef41d72218a18f5dcc04470b61 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/dynamodb_tut.rst @@ -0,0 +1,348 @@ +.. dynamodb_tut: + +============================================ +An Introduction to boto's DynamoDB interface +============================================ + +This tutorial focuses on the boto interface to AWS' DynamoDB_. This tutorial +assumes that you have boto already downloaded and installed. + +.. _DynamoDB: http://aws.amazon.com/dynamodb/ + +.. warning:: + + This tutorial covers the **ORIGINAL** release of DynamoDB. + It has since been supplanted by a second major version & an + updated API to talk to the new version. The documentation for the + new version of DynamoDB (& boto's support for it) is at + :doc:`DynamoDB v2 `. + + +Creating a Connection +--------------------- + +The first step in accessing DynamoDB is to create a connection to the service. +To do so, the most straight forward way is the following:: + + >>> import boto.dynamodb + >>> conn = boto.dynamodb.connect_to_region( + 'us-west-2', + aws_access_key_id='', + aws_secret_access_key='') + >>> conn + + +Bear in mind that if you have your credentials in boto config in your home +directory, the two keyword arguments in the call above are not needed. More +details on configuration can be found in :doc:`boto_config_tut`. + +The :py:func:`boto.dynamodb.connect_to_region` function returns a +:py:class:`boto.dynamodb.layer2.Layer2` instance, which is a high-level API +for working with DynamoDB. Layer2 is a set of abstractions that sit atop +the lower level :py:class:`boto.dynamodb.layer1.Layer1` API, which closely +mirrors the Amazon DynamoDB API. For the purpose of this tutorial, we'll +just be covering Layer2. + + +Listing Tables +-------------- + +Now that we have a DynamoDB connection object, we can then query for a list of +existing tables in that region:: + + >>> conn.list_tables() + ['test-table', 'another-table'] + + +Creating Tables +--------------- + +DynamoDB tables are created with the +:py:meth:`Layer2.create_table ` +method. While DynamoDB's items (a rough equivalent to a relational DB's row) +don't have a fixed schema, you do need to create a schema for the table's +hash key element, and the optional range key element. This is explained in +greater detail in DynamoDB's `Data Model`_ documentation. + +We'll start by defining a schema that has a hash key and a range key that +are both strings:: + + >>> message_table_schema = conn.create_schema( + hash_key_name='forum_name', + hash_key_proto_value=str, + range_key_name='subject', + range_key_proto_value=str + ) + +The next few things to determine are table name and read/write throughput. We'll +defer explaining throughput to the DynamoDB's `Provisioned Throughput`_ docs. + +We're now ready to create the table:: + + >>> table = conn.create_table( + name='messages', + schema=message_table_schema, + read_units=10, + write_units=10 + ) + >>> table + Table(messages) + +This returns a :py:class:`boto.dynamodb.table.Table` instance, which provides +simple ways to create (put), update, and delete items. + + +Getting a Table +--------------- + +To retrieve an existing table, use +:py:meth:`Layer2.get_table `:: + + >>> conn.list_tables() + ['test-table', 'another-table', 'messages'] + >>> table = conn.get_table('messages') + >>> table + Table(messages) + +:py:meth:`Layer2.get_table `, like +:py:meth:`Layer2.create_table `, +returns a :py:class:`boto.dynamodb.table.Table` instance. + +Keep in mind that :py:meth:`Layer2.get_table ` +will make an API call to retrieve various attributes of the table including the +creation time, the read and write capacity, and the table schema. If you +already know the schema, you can save an API call and create a +:py:class:`boto.dynamodb.table.Table` object without making any calls to +Amazon DynamoDB:: + + >>> table = conn.table_from_schema( + name='messages', + schema=message_table_schema) + +If you do this, the following fields will have ``None`` values: + + * create_time + * status + * read_units + * write_units + +In addition, the ``item_count`` and ``size_bytes`` will be 0. +If you create a table object directly from a schema object and +decide later that you need to retrieve any of these additional +attributes, you can use the +:py:meth:`Table.refresh ` method:: + + >>> from boto.dynamodb.schema import Schema + >>> table = conn.table_from_schema( + name='messages', + schema=Schema.create(hash_key=('forum_name', 'S'), + range_key=('subject', 'S'))) + >>> print table.write_units + None + >>> # Now we decide we need to know the write_units: + >>> table.refresh() + >>> print table.write_units + 10 + + +The recommended best practice is to retrieve a table object once and +use that object for the duration of your application. So, for example, +instead of this:: + + class Application(object): + def __init__(self, layer2): + self._layer2 = layer2 + + def retrieve_item(self, table_name, key): + return self._layer2.get_table(table_name).get_item(key) + +You can do something like this instead:: + + class Application(object): + def __init__(self, layer2): + self._layer2 = layer2 + self._tables_by_name = {} + + def retrieve_item(self, table_name, key): + table = self._tables_by_name.get(table_name) + if table is None: + table = self._layer2.get_table(table_name) + self._tables_by_name[table_name] = table + return table.get_item(key) + + +Describing Tables +----------------- + +To get a complete description of a table, use +:py:meth:`Layer2.describe_table `:: + + >>> conn.list_tables() + ['test-table', 'another-table', 'messages'] + >>> conn.describe_table('messages') + { + 'Table': { + 'CreationDateTime': 1327117581.624, + 'ItemCount': 0, + 'KeySchema': { + 'HashKeyElement': { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + 'RangeKeyElement': { + 'AttributeName': 'subject', + 'AttributeType': 'S' + } + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 10 + }, + 'TableName': 'messages', + 'TableSizeBytes': 0, + 'TableStatus': 'ACTIVE' + } + } + + +Adding Items +------------ + +Continuing on with our previously created ``messages`` table, adding an:: + + >>> table = conn.get_table('messages') + >>> item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + >>> item = table.new_item( + # Our hash key is 'forum' + hash_key='LOLCat Forum', + # Our range key is 'subject' + range_key='Check this out!', + # This has the + attrs=item_data + ) + +The +:py:meth:`Table.new_item ` method creates +a new :py:class:`boto.dynamodb.item.Item` instance with your specified +hash key, range key, and attributes already set. +:py:class:`Item ` is a :py:class:`dict` sub-class, +meaning you can edit your data as such:: + + item['a_new_key'] = 'testing' + del item['a_new_key'] + +After you are happy with the contents of the item, use +:py:meth:`Item.put ` to commit it to DynamoDB:: + + >>> item.put() + + +Retrieving Items +---------------- + +Now, let's check if it got added correctly. Since DynamoDB works under an +'eventual consistency' mode, we need to specify that we wish a consistent read, +as follows:: + + >>> table = conn.get_table('messages') + >>> item = table.get_item( + # Your hash key was 'forum_name' + hash_key='LOLCat Forum', + # Your range key was 'subject' + range_key='Check this out!' + ) + >>> item + { + # Note that this was your hash key attribute (forum_name) + 'forum_name': 'LOLCat Forum', + # This is your range key attribute (subject) + 'subject': 'Check this out!' + 'Body': 'http://url_to_lolcat.gif', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'SentBy': 'User A', + } + + +Updating Items +-------------- + +To update an item's attributes, simply retrieve it, modify the value, then +:py:meth:`Item.put ` it again:: + + >>> table = conn.get_table('messages') + >>> item = table.get_item( + hash_key='LOLCat Forum', + range_key='Check this out!' + ) + >>> item['SentBy'] = 'User B' + >>> item.put() + +Working with Decimals +--------------------- + +To avoid the loss of precision, you can stipulate that the +``decimal.Decimal`` type be used for numeric values:: + + >>> import decimal + >>> conn.use_decimals() + >>> table = conn.get_table('messages') + >>> item = table.new_item( + hash_key='LOLCat Forum', + range_key='Check this out!' + ) + >>> item['decimal_type'] = decimal.Decimal('1.12345678912345') + >>> item.put() + >>> print table.get_item('LOLCat Forum', 'Check this out!') + {u'forum_name': 'LOLCat Forum', u'decimal_type': Decimal('1.12345678912345'), + u'subject': 'Check this out!'} + +You can enable the usage of ``decimal.Decimal`` by using either the ``use_decimals`` +method, or by passing in the +:py:class:`Dynamizer ` class for +the ``dynamizer`` param:: + + >>> from boto.dynamodb.types import Dynamizer + >>> conn = boto.dynamodb.connect_to_region(dynamizer=Dynamizer) + +This mechanism can also be used if you want to customize the encoding/decoding +process of DynamoDB types. + + +Deleting Items +-------------- + +To delete items, use the +:py:meth:`Item.delete ` method:: + + >>> table = conn.get_table('messages') + >>> item = table.get_item( + hash_key='LOLCat Forum', + range_key='Check this out!' + ) + >>> item.delete() + + +Deleting Tables +--------------- + +.. WARNING:: + Deleting a table will also **permanently** delete all of its contents without prompt. Use carefully. + +There are two easy ways to delete a table. Through your top-level +:py:class:`Layer2 ` object:: + + >>> conn.delete_table(table) + +Or by getting the table, then using +:py:meth:`Table.delete `:: + + >>> table = conn.get_table('messages') + >>> table.delete() + + +.. _Data Model: http://docs.amazonwebservices.com/amazondynamodb/latest/developerguide/DataModel.html +.. _Provisioned Throughput: http://docs.amazonwebservices.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ec2_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ec2_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..140930beb3557b90782672d4eb796d361df6fae8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ec2_tut.rst @@ -0,0 +1,247 @@ +.. _ec2_tut: + +======================================= +An Introduction to boto's EC2 interface +======================================= + +This tutorial focuses on the boto interface to the Elastic Compute Cloud +from Amazon Web Services. This tutorial assumes that you have already +downloaded and installed boto. + +Creating a Connection +--------------------- + +The first step in accessing EC2 is to create a connection to the service. +The recommended way of doing this in boto is:: + + >>> import boto.ec2 + >>> conn = boto.ec2.connect_to_region("us-west-2", + ... aws_access_key_id='', + ... aws_secret_access_key='') + +At this point the variable ``conn`` will point to an EC2Connection object. In +this example, the AWS access key and AWS secret key are passed in to the method +explicitly. Alternatively, you can set the boto config environment variables +and then simply specify which region you want as follows:: + + >>> conn = boto.ec2.connect_to_region("us-west-2") + +In either case, conn will point to an EC2Connection object which we will +use throughout the remainder of this tutorial. + +Launching Instances +------------------- + +Possibly, the most important and common task you'll use EC2 for is to launch, +stop and terminate instances. In its most primitive form, you can launch an +instance as follows:: + + >>> conn.run_instances('') + +This will launch an instance in the specified region with the default parameters. +You will not be able to SSH into this machine, as it doesn't have a security +group set. See :doc:`security_groups` for details on creating one. + +Now, let's say that you already have a key pair, want a specific type of +instance, and you have your :doc:`security group ` all setup. +In this case we can use the keyword arguments to accomplish that:: + + >>> conn.run_instances( + '', + key_name='myKey', + instance_type='c1.xlarge', + security_groups=['your-security-group-here']) + +The main caveat with the above call is that it is possible to request an +instance type that is not compatible with the provided AMI (for example, the +instance was created for a 64-bit instance and you choose a m1.small instance_type). +For more details on the plethora of possible keyword parameters, be sure to +check out boto's :doc:`EC2 API reference `. + +Stopping Instances +------------------ +Once you have your instances up and running, you might wish to shut them down +if they're not in use. Please note that this will only de-allocate virtual +hardware resources (as well as instance store drives), but won't destroy your +EBS volumes -- this means you'll pay nominal provisioned EBS storage fees +even if your instance is stopped. To do this, you can do so as follows:: + + >>> conn.stop_instances(instance_ids=['instance-id-1','instance-id-2', ...]) + +This will request a 'graceful' stop of each of the specified instances. If you +wish to request the equivalent of unplugging your instance(s), simply add +``force=True`` keyword argument to the call above. Please note that stop +instance is not allowed with Spot instances. + +Terminating Instances +--------------------- +Once you are completely done with your instance and wish to surrender both +virtual hardware, root EBS volume and all other underlying components +you can request instance termination. To do so you can use the call bellow:: + + >>> conn.terminate_instances(instance_ids=['instance-id-1','instance-id-2', ...]) + +Please use with care since once you request termination for an instance there +is no turning back. + +Checking What Instances Are Running +----------------------------------- +You can also get information on your currently running instances:: + + >>> reservations = conn.get_all_reservations() + >>> reservations + [Reservation:r-00000000] + +A reservation corresponds to a command to start instances. You can see what +instances are associated with a reservation:: + + >>> instances = reservations[0].instances + >>> instances + [Instance:i-00000000] + +An instance object allows you get more meta-data available about the instance:: + + >>> inst = instances[0] + >>> inst.instance_type + u'c1.xlarge' + >>> inst.placement + u'us-west-2' + +In this case, we can see that our instance is a c1.xlarge instance in the +`us-west-2` availability zone. + +Checking Health Status Of Instances +----------------------------------- +You can also get the health status of your instances, including any scheduled events:: + + >>> statuses = conn.get_all_instance_status() + >>> statuses + [InstanceStatus:i-00000000] + +An instance status object allows you to get information about impaired +functionality or scheduled / system maintenance events:: + + >>> status = statuses[0] + >>> status.events + [Event:instance-reboot] + >>> event = status.events[0] + >>> event.description + u'Maintenance software update.' + >>> event.not_before + u'2011-12-11T04:00:00.000Z' + >>> event.not_after + u'2011-12-11T10:00:00.000Z' + >>> status.instance_status + Status:ok + >>> status.system_status + Status:ok + >>> status.system_status.details + {u'reachability': u'passed'} + +This will by default include the health status only for running instances. +If you wish to request the health status for all instances, simply add +``include_all_instances=True`` keyword argument to the call above. + +================================= +Using Elastic Block Storage (EBS) +================================= + + +EBS Basics +---------- + +EBS can be used by EC2 instances for permanent storage. Note that EBS volumes +must be in the same availability zone as the EC2 instance you wish to attach it +to. + +To actually create a volume you will need to specify a few details. The +following example will create a 50GB EBS in one of the `us-west-2` availability +zones:: + + >>> vol = conn.create_volume(50, "us-west-2") + >>> vol + Volume:vol-00000000 + +You can check that the volume is now ready and available:: + + >>> curr_vol = conn.get_all_volumes([vol.id])[0] + >>> curr_vol.status + u'available' + >>> curr_vol.zone + u'us-west-2' + +We can now attach this volume to the EC2 instance we created earlier, making it +available as a new device:: + + >>> conn.attach_volume (vol.id, inst.id, "/dev/sdx") + u'attaching' + +You will now have a new volume attached to your instance. Note that with some +Linux kernels, `/dev/sdx` may get translated to `/dev/xvdx`. This device can +now be used as a normal block device within Linux. + +Working With Snapshots +---------------------- + +Snapshots allow you to make point-in-time snapshots of an EBS volume for future +recovery. Snapshots allow you to create incremental backups, and can also be +used to instantiate multiple new volumes. Snapshots can also be used to move +EBS volumes across availability zones or making backups to S3. + +Creating a snapshot is easy:: + + >>> snapshot = conn.create_snapshot(vol.id, 'My snapshot') + >>> snapshot + Snapshot:snap-00000000 + +Once you have a snapshot, you can create a new volume from it. Volumes are +created lazily from snapshots, which means you can start using such a volume +straight away:: + + >>> new_vol = snapshot.create_volume('us-west-2') + >>> conn.attach_volume (new_vol.id, inst.id, "/dev/sdy") + u'attaching' + +If you no longer need a snapshot, you can also easily delete it:: + + >>> conn.delete_snapshot(snapshot.id) + True + + +Working With Launch Configurations +---------------------------------- + +Launch Configurations allow you to create a re-usable set of properties for an +instance. These are used with AutoScaling groups to produce consistent repeatable +instances sets. + +Creating a Launch Configuration is easy: + + >>> conn = boto.connect_autoscale() + >>> config = LaunchConfiguration(name='foo', image_id='ami-abcd1234', key_name='foo.pem') + >>> conn.create_launch_configuration(config) + +Once you have a launch configuration, you can list you current configurations: + + >>> conn = boto.connect_autoscale() + >>> config = conn.get_all_launch_configurations(names=['foo']) + +If you no longer need a launch configuration, you can delete it: + + >>> conn = boto.connect_autoscale() + >>> conn.delete_launch_configuration('foo') + +.. versionchanged:: 2.27.0 +.. Note:: + + If ``use_block_device_types=True`` is passed to the connection it will deserialize + Launch Configurations with Block Device Mappings into a re-usable format with + BlockDeviceType objects, similar to how AMIs are deserialized currently. Legacy + behavior is to put them into a format that is incompatabile with creating new Launch + Configurations. This switch is in place to preserve backwards compatability, but + its usage is the preferred format going forward. + + If you would like to use the new format, you should use something like: + + >>> conn = boto.connect_autoscale(use_block_device_types=True) + >>> config = conn.get_all_launch_configurations(names=['foo']) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/elb_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/elb_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..2b25e74d6b28c960367aced6427221d07af0396f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/elb_tut.rst @@ -0,0 +1,236 @@ +.. _elb_tut: + +========================================================== +An Introduction to boto's Elastic Load Balancing interface +========================================================== + +This tutorial focuses on the boto interface for `Elastic Load Balancing`_ +from Amazon Web Services. This tutorial assumes that you have already +downloaded and installed boto, and are familiar with the boto ec2 interface. + +.. _Elastic Load Balancing: http://aws.amazon.com/elasticloadbalancing/ + +Elastic Load Balancing Concepts +------------------------------- +`Elastic Load Balancing`_ (ELB) is intimately connected with Amazon's `Elastic +Compute Cloud`_ (EC2) service. Using the ELB service allows you to create a load +balancer - a DNS endpoint and set of ports that distributes incoming requests +to a set of EC2 instances. The advantages of using a load balancer is that it +allows you to truly scale up or down a set of backend instances without +disrupting service. Before the ELB service, you had to do this manually by +launching an EC2 instance and installing load balancer software on it (nginx, +haproxy, perlbal, etc.) to distribute traffic to other EC2 instances. + +Recall that the EC2 service is split into Regions, which are further +divided into Availability Zones (AZ). +For example, the US-East region is divided into us-east-1a, us-east-1b, +us-east-1c, us-east-1d, and us-east-1e. You can think of AZs as data centers - +each runs off a different set of ISP backbones and power providers. +ELB load balancers can span multiple AZs but cannot span multiple regions. +That means that if you'd like to create a set of instances spanning both the +US and Europe Regions you'd have to create two load balancers and have some +sort of other means of distributing requests between the two load balancers. +An example of this could be using GeoIP techniques to choose the correct load +balancer, or perhaps DNS round robin. Keep in mind also that traffic is +distributed equally over all AZs the ELB balancer spans. This means you should +have an equal number of instances in each AZ if you want to equally distribute +load amongst all your instances. + +.. _Elastic Compute Cloud: http://aws.amazon.com/ec2/ + +Creating a Connection +--------------------- + +The first step in accessing ELB is to create a connection to the service. + + +Like EC2, the ELB service has a different endpoint for each region. By default +the US East endpoint is used. To choose a specific region, use the +``connect_to_region`` function:: + + >>> import boto.ec2.elb + >>> elb = boto.ec2.elb.connect_to_region('us-west-2') + +Here's yet another way to discover what regions are available and then +connect to one:: + + >>> import boto.ec2.elb + >>> regions = boto.ec2.elb.regions() + >>> regions + [RegionInfo:us-east-1, + RegionInfo:ap-northeast-1, + RegionInfo:us-west-1, + RegionInfo:us-west-2, + RegionInfo:ap-southeast-1, + RegionInfo:eu-west-1] + >>> elb = regions[-1].connect() + +Alternatively, edit your boto.cfg with the default ELB endpoint to use:: + + [Boto] + elb_region_name = eu-west-1 + elb_region_endpoint = elasticloadbalancing.eu-west-1.amazonaws.com + +Getting Existing Load Balancers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To retrieve any existing load balancers: + +>>> conn.get_all_load_balancers() +[LoadBalancer:load-balancer-prod, LoadBalancer:load-balancer-staging] + +You can also filter by name + +>>> conn.get_all_load_balancers(load_balancer_names=['load-balancer-prod']) +[LoadBalancer:load-balancer-prod] + +:py:meth:`get_all_load_balancers ` +returns a :py:class:`boto.resultset.ResultSet` that contains instances +of :class:`boto.ec2.elb.loadbalancer.LoadBalancer`, each of which abstracts +access to a load balancer. :py:class:`ResultSet ` +works very much like a list. + +>>> balancers = conn.get_all_load_balancers() +>>> balancers[0] +LoadBalancer:load-balancer-prod + +Creating a Load Balancer +------------------------ +To create a load balancer you need the following: + #. The specific **ports and protocols** you want to load balancer over, and what port + you want to connect to all instances. + #. A **health check** - the ELB concept of a *heart beat* or *ping*. ELB will use this health + check to see whether your instances are up or down. If they go down, the load balancer + will no longer send requests to them. + #. A **list of Availability Zones** you'd like to create your load balancer over. + +Ports and Protocols +^^^^^^^^^^^^^^^^^^^ +An incoming connection to your load balancer will come on one or more ports - +for example 80 (HTTP) and 443 (HTTPS). Each can be using a protocol - +currently, the supported protocols are TCP and HTTP. We also need to tell the +load balancer which port to route connects *to* on each instance. For example, +to create a load balancer for a website that accepts connections on 80 and 443, +and that routes connections to port 8080 and 8443 on each instance, you would +specify that the load balancer ports and protocols are: + + * 80, 8080, HTTP + * 443, 8443, TCP + +This says that the load balancer will listen on two ports - 80 and 443. +Connections on 80 will use an HTTP load balancer to forward connections to port +8080 on instances. Likewise, the load balancer will listen on 443 to forward +connections to 8443 on each instance using the TCP balancer. We need to +use TCP for the HTTPS port because it is encrypted at the application +layer. Of course, we could specify the load balancer use TCP for port 80, +however specifying HTTP allows you to let ELB handle some work for you - +for example HTTP header parsing. + +.. _elb-configuring-a-health-check: + +Configuring a Health Check +^^^^^^^^^^^^^^^^^^^^^^^^^^ +A health check allows ELB to determine which instances are alive and able to +respond to requests. A health check is essentially a tuple consisting of: + + * *Target*: What to check on an instance. For a TCP check this is comprised of:: + + TCP:PORT_TO_CHECK + + Which attempts to open a connection on PORT_TO_CHECK. If the connection opens + successfully, that specific instance is deemed healthy, otherwise it is marked + temporarily as unhealthy. For HTTP, the situation is slightly different:: + + HTTP:PORT_TO_CHECK/RESOURCE + + This means that the health check will connect to the resource /RESOURCE on + PORT_TO_CHECK. If an HTTP 200 status is returned the instance is deemed healthy. + * *Interval*: How often the check is made. This is given in seconds and defaults + to 30. The valid range of intervals goes from 5 seconds to 600 seconds. + * *Timeout*: The number of seconds the load balancer will wait for a check to + return a result. + * *Unhealthy threshold*: The number of consecutive failed checks to deem the + instance as being dead. The default is 5, and the range of valid values lies + from 2 to 10. + +The following example creates a health check called *instance_health* that +simply checks instances every 20 seconds on port 80 over HTTP at the +resource /health for 200 successes. + +>>> from boto.ec2.elb import HealthCheck +>>> hc = HealthCheck( + interval=20, + healthy_threshold=3, + unhealthy_threshold=5, + target='HTTP:8080/health' + ) + +Putting It All Together +^^^^^^^^^^^^^^^^^^^^^^^ + +Finally, let's create a load balancer in the US region that listens on ports +80 and 443 and distributes requests to instances on 8080 and 8443 over HTTP +and TCP. We want the load balancer to span the availability zones +*us-east-1a* and *us-east-1b*: + +>>> zones = ['us-east-1a', 'us-east-1b'] +>>> ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] +>>> lb = conn.create_load_balancer('my-lb', zones, ports) +>>> # This is from the previous section. +>>> lb.configure_health_check(hc) + +The load balancer has been created. To see where you can actually connect to +it, do: + +>>> print lb.dns_name +my_elb-123456789.us-east-1.elb.amazonaws.com + +You can then CNAME map a better name, i.e. www.MYWEBSITE.com to the +above address. + +Adding Instances To a Load Balancer +----------------------------------- + +Now that the load balancer has been created, there are two ways to add +instances to it: + + #. Manually, adding each instance in turn. + #. Mapping an autoscale group to the load balancer. Please see the + :doc:`Autoscale tutorial ` for information on how to do this. + +Manually Adding and Removing Instances +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Assuming you have a list of instance ids, you can add them to the load balancer + +>>> instance_ids = ['i-4f8cf126', 'i-0bb7ca62'] +>>> lb.register_instances(instance_ids) + +Keep in mind that these instances should be in Security Groups that match the +internal ports of the load balancer you just created (for this example, they +should allow incoming connections on 8080 and 8443). + +To remove instances: + +>>> lb.deregister_instances(instance_ids) + +Modifying Availability Zones for a Load Balancer +------------------------------------------------ + +If you wanted to disable one or more zones from an existing load balancer: + +>>> lb.disable_zones(['us-east-1a']) + +You can then terminate each instance in the disabled zone and then deregister then from your load +balancer. + +To enable zones: + +>>> lb.enable_zones(['us-east-1c']) + +Deleting a Load Balancer +------------------------ + +>>> lb.delete() + + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/emr_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/emr_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..c42d188f8672fcfb4ed08e610623b18412c2e12e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/emr_tut.rst @@ -0,0 +1,107 @@ +.. _emr_tut: + +===================================================== +An Introduction to boto's Elastic Mapreduce interface +===================================================== + +This tutorial focuses on the boto interface to Elastic Mapreduce from +Amazon Web Services. This tutorial assumes that you have already +downloaded and installed boto. + +Creating a Connection +--------------------- +The first step in accessing Elastic Mapreduce is to create a connection +to the service. There are two ways to do this in boto. The first is: + +>>> from boto.emr.connection import EmrConnection +>>> conn = EmrConnection('', '') + +At this point the variable conn will point to an EmrConnection object. +In this example, the AWS access key and AWS secret key are passed in to +the method explicitly. Alternatively, you can set the environment variables: + +AWS_ACCESS_KEY_ID - Your AWS Access Key ID \ +AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key + +and then call the constructor without any arguments, like this: + +>>> conn = EmrConnection() + +There is also a shortcut function in boto +that makes it easy to create EMR connections: + +>>> import boto.emr +>>> conn = boto.emr.connect_to_region('us-west-2') + +In either case, conn points to an EmrConnection object which we will use +throughout the remainder of this tutorial. + +Creating Streaming JobFlow Steps +-------------------------------- +Upon creating a connection to Elastic Mapreduce you will next +want to create one or more jobflow steps. There are two types of steps, streaming +and custom jar, both of which have a class in the boto Elastic Mapreduce implementation. + +Creating a streaming step that runs the AWS wordcount example, itself written in Python, can be accomplished by: + +>>> from boto.emr.step import StreamingStep +>>> step = StreamingStep(name='My wordcount example', +... mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py', +... reducer='aggregate', +... input='s3n://elasticmapreduce/samples/wordcount/input', +... output='s3n:///output/wordcount_output') + +where is a bucket you have created in S3. + +Note that this statement does not run the step, that is accomplished later when we create a jobflow. + +Additional arguments of note to the streaming jobflow step are cache_files, cache_archive and step_args. The options cache_files and cache_archive enable you to use the Hadoops distributed cache to share files amongst the instances that run the step. The argument step_args allows one to pass additional arguments to Hadoop streaming, for example modifications to the Hadoop job configuration. + +Creating Custom Jar Job Flow Steps +---------------------------------- + +The second type of jobflow step executes tasks written with a custom jar. Creating a custom jar step for the AWS CloudBurst example can be accomplished by: + +>>> from boto.emr.step import JarStep +>>> step = JarStep(name='Coudburst example', +... jar='s3n://elasticmapreduce/samples/cloudburst/cloudburst.jar', +... step_args=['s3n://elasticmapreduce/samples/cloudburst/input/s_suis.br', +... 's3n://elasticmapreduce/samples/cloudburst/input/100k.br', +... 's3n:///output/cloudfront_output', +... 36, 3, 0, 1, 240, 48, 24, 24, 128, 16]) + +Note that this statement does not actually run the step, that is accomplished later when we create a jobflow. Also note that this JarStep does not include a main_class argument since the jar MANIFEST.MF has a Main-Class entry. + +Creating JobFlows +----------------- +Once you have created one or more jobflow steps, you will next want to create and run a jobflow. Creating a jobflow that executes either of the steps we created above can be accomplished by: + +>>> import boto.emr +>>> conn = boto.emr.connect_to_region('us-west-2') +>>> jobid = conn.run_jobflow(name='My jobflow', +... log_uri='s3:///jobflow_logs', +... steps=[step]) + +The method will not block for the completion of the jobflow, but will immediately return. The status of the jobflow can be determined by: + +>>> status = conn.describe_jobflow(jobid) +>>> status.state +u'STARTING' + +One can then use this state to block for a jobflow to complete. Valid jobflow states currently defined in the AWS API are COMPLETED, FAILED, TERMINATED, RUNNING, SHUTTING_DOWN, STARTING and WAITING. + +In some cases you may not have built all of the steps prior to running the jobflow. In these cases additional steps can be added to a jobflow by running: + +>>> conn.add_jobflow_steps(jobid, [second_step]) + +If you wish to add additional steps to a running jobflow you may want to set the keep_alive parameter to True in run_jobflow so that the jobflow does not automatically terminate when the first step completes. + +The run_jobflow method has a number of important parameters that are worth investigating. They include parameters to change the number and type of EC2 instances on which the jobflow is executed, set a SSH key for manual debugging and enable AWS console debugging. + +Terminating JobFlows +-------------------- +By default when all the steps of a jobflow have finished or failed the jobflow terminates. However, if you set the keep_alive parameter to True or just want to halt the execution of a jobflow early you can terminate a jobflow by: + +>>> import boto.emr +>>> conn = boto.emr.connect_to_region('us-west-2') +>>> conn.terminate_jobflow('') diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/extensions/githublinks/__init__.py b/desktop/core/ext-py/boto-2.38.0/docs/source/extensions/githublinks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9641a83d2763e852f2e2f7ad34a603d4df64a8a2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/extensions/githublinks/__init__.py @@ -0,0 +1,55 @@ +"""Add github roles to sphinx docs. + +Based entirely on Doug Hellmann's bitbucket version, but +adapted for Github. +(https://bitbucket.org/dhellmann/sphinxcontrib-bitbucket/) + +""" +from urlparse import urljoin + +from docutils import nodes, utils +from docutils.parsers.rst.roles import set_classes + + +def make_node(rawtext, app, type_, slug, options): + base_url = app.config.github_project_url + if base_url is None: + raise ValueError( + "Configuration value for 'github_project_url' is not set.") + relative = '%s/%s' % (type_, slug) + full_ref = urljoin(base_url, relative) + set_classes(options) + if type_ == 'issues': + type_ = 'issue' + node = nodes.reference(rawtext, type_ + ' ' + utils.unescape(slug), + refuri=full_ref, **options) + return node + + +def github_sha(name, rawtext, text, lineno, inliner, + options={}, content=[]): + app = inliner.document.settings.env.app + node = make_node(rawtext, app, 'commit', text, options) + return [node], [] + + +def github_issue(name, rawtext, text, lineno, inliner, + options={}, content=[]): + try: + issue = int(text) + except ValueError: + msg = inliner.reporter.error( + "Invalid Github Issue '%s', must be an integer" % text, + line=lineno) + problem = inliner.problematic(rawtext, rawtext, msg) + return [problem], [msg] + app = inliner.document.settings.env.app + node = make_node(rawtext, app, 'issues', str(issue), options) + return [node], [] + + +def setup(app): + app.info('Adding github link roles') + app.add_role('sha', github_sha) + app.add_role('issue', github_issue) + app.add_config_value('github_project_url', None, 'env') diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/getting_started.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/getting_started.rst new file mode 100644 index 0000000000000000000000000000000000000000..29f70f8fe1f526cef840a977d5c0094275ef07f2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/getting_started.rst @@ -0,0 +1,187 @@ +.. _getting-started: + +========================= +Getting Started with Boto +========================= + +This tutorial will walk you through installing and configuring ``boto``, as +well how to use it to make API calls. + +This tutorial assumes you are familiar with Python & that you have registered +for an `Amazon Web Services`_ account. You'll need retrieve your +``Access Key ID`` and ``Secret Access Key`` from the web-based console. + +.. _`Amazon Web Services`: https://aws.amazon.com/ + + +Installing Boto +--------------- + +You can use ``pip`` to install the latest released version of ``boto``:: + + pip install boto + +If you want to install ``boto`` from source:: + + git clone git://github.com/boto/boto.git + cd boto + python setup.py install + +.. note:: + + For most services, this is enough to get going. However, to support + everything Boto ships with, you should additionally run + ``pip install -r requirements.txt``. + + This installs all additional, non-stdlib modules, enabling use of things + like ``boto.cloudsearch``, ``boto.manage`` & ``boto.mashups``, as well as + covering everything needed for the test suite. + + +Using Virtual Environments +-------------------------- + +Another common way to install ``boto`` is to use a ``virtualenv``, which +provides isolated environments. First, install the ``virtualenv`` Python +package:: + + pip install virtualenv + +Next, create a virtual environment by using the ``virtualenv`` command and +specifying where you want the virtualenv to be created (you can specify +any directory you like, though this example allows for compatibility with +``virtualenvwrapper``):: + + mkdir ~/.virtualenvs + virtualenv ~/.virtualenvs/boto + +You can now activate the virtual environment:: + + source ~/.virtualenvs/boto/bin/activate + +Now, any usage of ``python`` or ``pip`` (within the current shell) will default +to the new, isolated version within your virtualenv. + +You can now install ``boto`` into this virtual environment:: + + pip install boto + +When you are done using ``boto``, you can deactivate your virtual environment:: + + deactivate + +If you are creating a lot of virtual environments, `virtualenvwrapper`_ +is an excellent tool that lets you easily manage your virtual environments. + +.. _`virtualenvwrapper`: http://virtualenvwrapper.readthedocs.org/en/latest/ + + +Configuring Boto Credentials +---------------------------- + +You have a few options for configuring ``boto`` (see :doc:`boto_config_tut`). +For this tutorial, we'll be using a configuration file. First, create a +``~/.boto`` file with these contents:: + + [Credentials] + aws_access_key_id = YOURACCESSKEY + aws_secret_access_key = YOURSECRETKEY + +``boto`` supports a number of configuration values. For more information, +see :doc:`boto_config_tut`. The above file, however, is all we need for now. +You're now ready to use ``boto``. + + +Making Connections +------------------ + +``boto`` provides a number of convenience functions to simplify connecting to a +service. For example, to work with S3, you can run:: + + >>> import boto + >>> s3 = boto.connect_s3() + +If you want to connect to a different region, you can import the service module +and use the ``connect_to_region`` functions. For example, to create an EC2 +client in 'us-west-2' region, you'd run the following:: + + >>> import boto.ec2 + >>> ec2 = boto.ec2.connect_to_region('us-west-2') + + +Troubleshooting Connections +--------------------------- + +When calling the various ``connect_*`` functions, you might run into an error +like this:: + + >>> import boto + >>> s3 = boto.connect_s3() + Traceback (most recent call last): + File "", line 1, in + File "boto/__init__.py", line 121, in connect_s3 + return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs) + File "boto/s3/connection.py", line 171, in __init__ + validate_certs=validate_certs) + File "boto/connection.py", line 548, in __init__ + host, config, self.provider, self._required_auth_capability()) + File "boto/auth.py", line 668, in get_auth_handler + 'Check your credentials' % (len(names), str(names))) + boto.exception.NoAuthHandlerFound: No handler was ready to authenticate. 1 handlers were checked. ['HmacAuthV1Handler'] Check your credentials + +This is because ``boto`` cannot find credentials to use. Verify that you have +created a ``~/.boto`` file as shown above. You can also turn on debug logging +to verify where your credentials are coming from:: + + >>> import boto + >>> boto.set_stream_logger('boto') + >>> s3 = boto.connect_s3() + 2012-12-10 17:15:03,799 boto [DEBUG]:Using access key found in config file. + 2012-12-10 17:15:03,799 boto [DEBUG]:Using secret key found in config file. + + +Interacting with AWS Services +----------------------------- + +Once you have a client for the specific service you want, there are methods on +that object that will invoke API operations for that service. The following +code demonstrates how to create a bucket and put an object in that bucket:: + + >>> import boto + >>> import time + >>> s3 = boto.connect_s3() + + # Create a new bucket. Buckets must have a globally unique name (not just + # unique to your account). + >>> bucket = s3.create_bucket('boto-demo-%s' % int(time.time())) + + # Create a new key/value pair. + >>> key = bucket.new_key('mykey') + >>> key.set_contents_from_string("Hello World!") + + # Sleep to ensure the data is eventually there. + >>> time.sleep(2) + + # Retrieve the contents of ``mykey``. + >>> print key.get_contents_as_string() + 'Hello World!' + + # Delete the key. + >>> key.delete() + # Delete the bucket. + >>> bucket.delete() + +Each service supports a different set of commands. You'll want to refer to the +other guides & API references in this documentation, as well as referring to +the `official AWS API`_ documentation. + +.. _`official AWS API`: https://aws.amazon.com/documentation/ + +Next Steps +---------- + +For many of the services that ``boto`` supports, there are tutorials as +well as detailed API documentation. If you are interested in a specific +service, the tutorial for the service is a good starting point. For instance, +if you'd like more information on S3, check out the :ref:`S3 Tutorial ` +and the :doc:`S3 API reference `. diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/index.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..a5aa0b00c05a8a27e980e02763e960c48f5e0353 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/index.rst @@ -0,0 +1,253 @@ +.. _index: + +=============================================== +boto: A Python interface to Amazon Web Services +=============================================== + +An integrated interface to current and future infrastructural services +offered by `Amazon Web Services`_. + +Currently, all features work with Python 2.6 and 2.7. Work is under way to +support Python 3.3+ in the same codebase. Modules are being ported one at +a time with the help of the open source community, so please check below +for compatibility with Python 3.3+. + +To port a module to Python 3.3+, please view our +:doc:`Contributing Guidelines ` and the +:doc:`Porting Guide `. If you would like, you can open an +issue to let others know about your work in progress. Tests **must** pass +on Python 2.6, 2.7, 3.3, and 3.4 for pull requests to be accepted. + +.. _Amazon Web Services: http://aws.amazon.com/ + +Getting Started +--------------- + +If you've never used ``boto`` before, you should read the +:doc:`Getting Started with Boto ` guide to get familiar +with ``boto`` & its usage. + +Currently Supported Services +---------------------------- + +* **Compute** + + * :doc:`Elastic Compute Cloud (EC2) ` -- (:doc:`API Reference `) (Python 3) + * :doc:`Elastic MapReduce (EMR) ` -- (:doc:`API Reference `) (Python 3) + * :doc:`Auto Scaling ` -- (:doc:`API Reference `) (Python 3) + * Kinesis -- (:doc:`API Reference `) (Python 3) + * Lambda -- (:doc:`API Reference `) (Python 3) + * EC2 Container Service (ECS) -- (:doc:`API Reference `) (Python 3) + +* **Content Delivery** + + * :doc:`CloudFront ` -- (:doc:`API Reference `) (Python 3) + +* **Database** + + * :doc:`DynamoDB2 ` -- (:doc:`API Reference `) -- (:doc:`Migration Guide from v1 `) + * :doc:`DynamoDB ` -- (:doc:`API Reference `) (Python 3) + * Relational Data Services 2 (RDS) -- (:doc:`API Reference `) -- (:doc:`Migration Guide from v1 `) + * :doc:`Relational Data Services (RDS) ` -- (:doc:`API Reference `) + * ElastiCache -- (:doc:`API Reference `) (Python 3) + * Redshift -- (:doc:`API Reference `) (Python 3) + * :doc:`SimpleDB ` -- (:doc:`API Reference `) (Python 3) + +* **Deployment and Management** + + * CloudFormation -- (:doc:`API Reference `) (Python 3) + * Elastic Beanstalk -- (:doc:`API Reference `) (Python 3) + * Data Pipeline -- (:doc:`API Reference `) (Python 3) + * Opsworks -- (:doc:`API Reference `) (Python 3) + * CloudTrail -- (:doc:`API Reference `) (Python 3) + * CodeDeploy -- (:doc:`API Reference `) (Python 3) + +* **Administration & Security** + + * Identity and Access Management (IAM) -- (:doc:`API Reference `) (Python 3) + * Security Token Service (STS) -- (:doc:`API Reference `) (Python 3) + * Key Management Service (KMS) -- (:doc:`API Reference `) (Python 3) + * Config -- (:doc:`API Reference `) (Python 3) + * CloudHSM -- (:doc:`API Reference `) (Python 3) + +* **Application Services** + + * Cloudsearch 2 -- (:doc:`API Reference `) (Python 3) + * :doc:`Cloudsearch ` -- (:doc:`API Reference `) (Python 3) + * CloudSearch Domain --(:doc:`API Reference `) (Python 3) + * Elastic Transcoder -- (:doc:`API Reference `) (Python 3) + * :doc:`Simple Workflow Service (SWF) ` -- (:doc:`API Reference `) (Python 3) + * :doc:`Simple Queue Service (SQS) ` -- (:doc:`API Reference `) (Python 3) + * Simple Notification Service (SNS) -- (:doc:`API Reference `) (Python 3) + * :doc:`Simple Email Service (SES) ` -- (:doc:`API Reference `) (Python 3) + * Amazon Cognito Identity -- (:doc:`API Reference `) (Python 3) + * Amazon Cognito Sync -- (:doc:`API Reference `) (Python 3) + * Amazon Machine Learning -- (:doc:`API Reference `) (Python 3) + +* **Monitoring** + + * :doc:`CloudWatch ` -- (:doc:`API Reference `) (Python 3) + * CloudWatch Logs -- (:doc:`API Reference `) (Python 3) + +* **Networking** + + * :doc:`Route 53 ` -- (:doc:`API Reference `) (Python 3) + * Route 53 Domains -- (:doc:`API Reference `) (Python 3) + * :doc:`Virtual Private Cloud (VPC) ` -- (:doc:`API Reference `) (Python 3) + * :doc:`Elastic Load Balancing (ELB) ` -- (:doc:`API Reference `) (Python 3) + * AWS Direct Connect (Python 3) + +* **Payments & Billing** + + * Flexible Payments Service (FPS) -- (:doc:`API Reference `) + +* **Storage** + + * :doc:`Simple Storage Service (S3) ` -- (:doc:`API Reference `) (Python 3) + * Amazon Glacier -- (:doc:`API Reference `) (Python 3) + * Google Cloud Storage -- (:doc:`API Reference `) + +* **Workforce** + + * Mechanical Turk -- (:doc:`API Reference `) + +* **Other** + + * Marketplace Web Services -- (:doc:`API Reference `) (Python 3) + * :doc:`Support ` -- (:doc:`API Reference `) (Python 3) + +Additional Resources +-------------------- + +* :doc:`Applications Built On Boto ` +* :doc:`Command Line Utilities ` +* :doc:`Boto Config Tutorial ` +* :doc:`Contributing to Boto ` +* :doc:`Evaluating Application performance with Boto logging ` +* `Boto Source Repository`_ +* `Boto Issue Tracker`_ +* `Boto Twitter`_ +* `Follow Mitch on Twitter`_ +* Join our `IRC channel`_ (#boto on FreeNode). + +.. _Boto Issue Tracker: https://github.com/boto/boto/issues +.. _Boto Source Repository: https://github.com/boto/boto +.. _Boto Twitter: http://twitter.com/pythonboto +.. _IRC channel: http://webchat.freenode.net/?channels=boto +.. _Follow Mitch on Twitter: http://twitter.com/garnaat + + +Release Notes +------------- + +.. toctree:: + :titlesonly: + + releasenotes/v2.37.0 + releasenotes/v2.36.0 + releasenotes/v2.35.2 + releasenotes/v2.35.1 + releasenotes/v2.35.0 + releasenotes/v2.34.0 + releasenotes/v2.33.0 + releasenotes/v2.32.1 + releasenotes/v2.32.0 + releasenotes/v2.31.1 + releasenotes/v2.31.0 + releasenotes/v2.30.0 + releasenotes/v2.29.1 + releasenotes/v2.29.0 + releasenotes/v2.28.0 + releasenotes/v2.27.0 + releasenotes/v2.26.1 + releasenotes/v2.26.0 + releasenotes/v2.25.0 + releasenotes/v2.24.0 + releasenotes/v2.23.0 + releasenotes/v2.22.1 + releasenotes/v2.22.0 + releasenotes/v2.21.2 + releasenotes/v2.21.1 + releasenotes/v2.21.0 + releasenotes/v2.20.1 + releasenotes/v2.20.0 + releasenotes/v2.19.0 + releasenotes/v2.18.0 + releasenotes/v2.17.0 + releasenotes/v2.16.0 + releasenotes/v2.15.0 + releasenotes/v2.14.0 + releasenotes/v2.13.3 + releasenotes/v2.13.2 + releasenotes/v2.13.0 + releasenotes/v2.12.0 + releasenotes/v2.11.0 + releasenotes/v2.10.0 + releasenotes/v2.9.9 + releasenotes/v2.9.8 + releasenotes/v2.9.7 + releasenotes/v2.9.6 + releasenotes/v2.9.5 + releasenotes/v2.9.4 + releasenotes/v2.9.3 + releasenotes/v2.9.2 + releasenotes/v2.9.1 + releasenotes/v2.9.0 + releasenotes/v2.8.0 + releasenotes/v2.7.0 + releasenotes/v2.6.0 + releasenotes/v2.5.2 + releasenotes/v2.5.1 + releasenotes/v2.5.0 + releasenotes/v2.4.0 + releasenotes/v2.3.0 + releasenotes/v2.2.2 + releasenotes/v2.2.1 + releasenotes/v2.2.0 + releasenotes/v2.1.1 + releasenotes/v2.1.0 + releasenotes/v2.0.0 + releasenotes/v2.0b1 + + +.. toctree:: + :hidden: + :glob: + + getting_started + ec2_tut + security_groups + emr_tut + autoscale_tut + cloudfront_tut + simpledb_tut + dynamodb_tut + rds_tut + sqs_tut + ses_tut + swf_tut + cloudsearch_tut + cloudwatch_tut + vpc_tut + elb_tut + s3_tut + route53_tut + boto_config_tut + documentation + contributing + commandline + support_tut + dynamodb2_tut + migrations/dynamodb_v1_to_v2 + migrations/rds_v1_to_v2 + apps_built_on_boto + ref/* + releasenotes/* + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/migrations/dynamodb_v1_to_v2.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/migrations/dynamodb_v1_to_v2.rst new file mode 100644 index 0000000000000000000000000000000000000000..d90c3d64b6c89ec3ded8dbfe025214a06ccc5ce9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/migrations/dynamodb_v1_to_v2.rst @@ -0,0 +1,366 @@ +.. dynamodb_v1_to_v2: + +========================================= +Migrating from DynamoDB v1 to DynamoDB v2 +========================================= + +For the v2 release of AWS' DynamoDB_, the high-level API for interacting via +``boto`` was rewritten. Since there were several new features added in v2, +people using the v1 API may wish to transition their code to the new API. +This guide covers the high-level APIs. + +.. _DynamoDB: http://aws.amazon.com/dynamodb/ + + +Creating New Tables +=================== + +DynamoDB v1:: + + >>> import boto.dynamodb + >>> conn = boto.dynamodb.connect_to_region() + >>> message_table_schema = conn.create_schema( + ... hash_key_name='forum_name', + ... hash_key_proto_value=str, + ... range_key_name='subject', + ... range_key_proto_value=str + ... ) + >>> table = conn.create_table( + ... name='messages', + ... schema=message_table_schema, + ... read_units=10, + ... write_units=10 + ... ) + +DynamoDB v2:: + + >>> from boto.dynamodb2.fields import HashKey + >>> from boto.dynamodb2.fields import RangeKey + >>> from boto.dynamodb2.table import Table + + >>> table = Table.create('messages', schema=[ + ... HashKey('forum_name'), + ... RangeKey('subject'), + ... ], throughput={ + ... 'read': 10, + ... 'write': 10, + ... }) + + +Using an Existing Table +======================= + +DynamoDB v1:: + + >>> import boto.dynamodb + >>> conn = boto.dynamodb.connect_to_region() + # With API calls. + >>> table = conn.get_table('messages') + + # Without API calls. + >>> message_table_schema = conn.create_schema( + ... hash_key_name='forum_name', + ... hash_key_proto_value=str, + ... range_key_name='subject', + ... range_key_proto_value=str + ... ) + >>> table = conn.table_from_schema( + ... name='messages', + ... schema=message_table_schema) + + +DynamoDB v2:: + + >>> from boto.dynamodb2.table import Table + # With API calls. + >>> table = Table('messages') + + # Without API calls. + >>> from boto.dynamodb2.fields import HashKey + >>> from boto.dynamodb2.table import Table + >>> table = Table('messages', schema=[ + ... HashKey('forum_name'), + ... HashKey('subject'), + ... ]) + + +Updating Throughput +=================== + +DynamoDB v1:: + + >>> import boto.dynamodb + >>> conn = boto.dynamodb.connect_to_region() + >>> table = conn.get_table('messages') + >>> conn.update_throughput(table, read_units=5, write_units=15) + +DynamoDB v2:: + + >>> from boto.dynamodb2.table import Table + >>> table = Table('messages') + >>> table.update(throughput={ + ... 'read': 5, + ... 'write': 15, + ... }) + + +Deleting a Table +================ + +DynamoDB v1:: + + >>> import boto.dynamodb + >>> conn = boto.dynamodb.connect_to_region() + >>> table = conn.get_table('messages') + >>> conn.delete_table(table) + +DynamoDB v2:: + + >>> from boto.dynamodb2.table import Table + >>> table = Table('messages') + >>> table.delete() + + +Creating an Item +================ + +DynamoDB v1:: + + >>> import boto.dynamodb + >>> conn = boto.dynamodb.connect_to_region() + >>> table = conn.get_table('messages') + >>> item_data = { + ... 'Body': 'http://url_to_lolcat.gif', + ... 'SentBy': 'User A', + ... 'ReceivedTime': '12/9/2011 11:36:03 PM', + ... } + >>> item = table.new_item( + ... # Our hash key is 'forum' + ... hash_key='LOLCat Forum', + ... # Our range key is 'subject' + ... range_key='Check this out!', + ... # This has the + ... attrs=item_data + ... ) + +DynamoDB v2:: + + >>> from boto.dynamodb2.table import Table + >>> table = Table('messages') + >>> item = table.put_item(data={ + ... 'forum_name': 'LOLCat Forum', + ... 'subject': 'Check this out!', + ... 'Body': 'http://url_to_lolcat.gif', + ... 'SentBy': 'User A', + ... 'ReceivedTime': '12/9/2011 11:36:03 PM', + ... }) + + +Getting an Existing Item +======================== + +DynamoDB v1:: + + >>> table = conn.get_table('messages') + >>> item = table.get_item( + ... hash_key='LOLCat Forum', + ... range_key='Check this out!' + ... ) + +DynamoDB v2:: + + >>> table = Table('messages') + >>> item = table.get_item( + ... forum_name='LOLCat Forum', + ... subject='Check this out!' + ... ) + + +Updating an Item +================ + +DynamoDB v1:: + + >>> item['a_new_key'] = 'testing' + >>> del item['a_new_key'] + >>> item.put() + +DynamoDB v2:: + + >>> item['a_new_key'] = 'testing' + >>> del item['a_new_key'] + + # Conditional save, only if data hasn't changed. + >>> item.save() + + # Forced full overwrite. + >>> item.save(overwrite=True) + + # Partial update (only changed fields). + >>> item.partial_save() + + +Deleting an Item +================ + +DynamoDB v1:: + + >>> item.delete() + +DynamoDB v2:: + + >>> item.delete() + + +Querying +======== + +DynamoDB v1:: + + >>> import boto.dynamodb + >>> conn = boto.dynamodb.connect_to_region() + >>> table = conn.get_table('messages') + >>> from boto.dynamodb.condition import BEGINS_WITH + >>> items = table.query('Amazon DynamoDB', + ... range_key_condition=BEGINS_WITH('DynamoDB'), + ... request_limit=1, max_results=1) + >>> for item in items: + >>> print item['Body'] + +DynamoDB v2:: + + >>> from boto.dynamodb2.table import Table + >>> table = Table('messages') + >>> items = table.query_2( + ... forum_name__eq='Amazon DynamoDB', + ... subject__beginswith='DynamoDB', + ... limit=1 + ... ) + >>> for item in items: + >>> print item['Body'] + + +Scans +===== + +DynamoDB v1:: + + >>> import boto.dynamodb + >>> conn = boto.dynamodb.connect_to_region() + >>> table = conn.get_table('messages') + + # All items. + >>> items = table.scan() + + # With a filter. + >>> items = table.scan(scan_filter={'Replies': GT(0)}) + +DynamoDB v2:: + + >>> from boto.dynamodb2.table import Table + >>> table = Table('messages') + + # All items. + >>> items = table.scan() + + # With a filter. + >>> items = table.scan(replies__gt=0) + + +Batch Gets +========== + +DynamoDB v1:: + + >>> import boto.dynamodb + >>> conn = boto.dynamodb.connect_to_region() + >>> table = conn.get_table('messages') + >>> from boto.dynamodb.batch import BatchList + >>> the_batch = BatchList(conn) + >>> the_batch.add_batch(table, keys=[ + ... ('LOLCat Forum', 'Check this out!'), + ... ('LOLCat Forum', 'I can haz docs?'), + ... ('LOLCat Forum', 'Maru'), + ... ]) + >>> results = conn.batch_get_item(the_batch) + + # (Largely) Raw dictionaries back from DynamoDB. + >>> for item_dict in response['Responses'][table.name]['Items']: + ... print item_dict['Body'] + +DynamoDB v2:: + + >>> from boto.dynamodb2.table import Table + >>> table = Table('messages') + >>> results = table.batch_get(keys=[ + ... {'forum_name': 'LOLCat Forum', 'subject': 'Check this out!'}, + ... {'forum_name': 'LOLCat Forum', 'subject': 'I can haz docs?'}, + ... {'forum_name': 'LOLCat Forum', 'subject': 'Maru'}, + ... ]) + + # Lazy requests across pages, if paginated. + >>> for res in results: + ... # You get back actual ``Item`` instances. + ... print item['Body'] + + +Batch Writes +============ + +DynamoDB v1:: + + >>> import boto.dynamodb + >>> conn = boto.dynamodb.connect_to_region() + >>> table = conn.get_table('messages') + >>> from boto.dynamodb.batch import BatchWriteList + >>> from boto.dynamodb.item import Item + + # You must manually manage this so that your total ``puts/deletes`` don't + # exceed 25. + >>> the_batch = BatchList(conn) + >>> the_batch.add_batch(table, puts=[ + ... Item(table, 'Corgi Fanciers', 'Sploots!', { + ... 'Body': 'Post your favorite corgi-on-the-floor shots!', + ... 'SentBy': 'User B', + ... 'ReceivedTime': '2013/05/02 10:56:45 AM', + ... }), + ... Item(table, 'Corgi Fanciers', 'Maximum FRAPS', { + ... 'Body': 'http://internetvideosite/watch?v=1247869', + ... 'SentBy': 'User C', + ... 'ReceivedTime': '2013/05/01 09:15:25 PM', + ... }), + ... ], deletes=[ + ... ('LOLCat Forum', 'Off-topic post'), + ... ('LOLCat Forum', 'They be stealin mah bukket!'), + ... ]) + >>> conn.batch_write_item(the_writes) + +DynamoDB v2:: + + >>> from boto.dynamodb2.table import Table + >>> table = Table('messages') + + # Uses a context manager, which also automatically handles batch sizes. + >>> with table.batch_write() as batch: + ... batch.delete_item( + ... forum_name='LOLCat Forum', + ... subject='Off-topic post' + ... ) + ... batch.put_item(data={ + ... 'forum_name': 'Corgi Fanciers', + ... 'subject': 'Sploots!', + ... 'Body': 'Post your favorite corgi-on-the-floor shots!', + ... 'SentBy': 'User B', + ... 'ReceivedTime': '2013/05/02 10:56:45 AM', + ... }) + ... batch.put_item(data={ + ... 'forum_name': 'Corgi Fanciers', + ... 'subject': 'Sploots!', + ... 'Body': 'Post your favorite corgi-on-the-floor shots!', + ... 'SentBy': 'User B', + ... 'ReceivedTime': '2013/05/02 10:56:45 AM', + ... }) + ... batch.delete_item( + ... forum_name='LOLCat Forum', + ... subject='They be stealin mah bukket!' + ... ) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/migrations/rds_v1_to_v2.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/migrations/rds_v1_to_v2.rst new file mode 100644 index 0000000000000000000000000000000000000000..944288cbd35c6b9ff9da5d247b680a21b3000dd8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/migrations/rds_v1_to_v2.rst @@ -0,0 +1,91 @@ +.. rds_v1_to_v2: + +=============================== +Migrating from RDS v1 to RDS v2 +=============================== + +The original ``boto.rds`` module has historically lagged quite far behind the +service (at time of writing, almost 50% of the API calls are +missing/out-of-date). To address this, the Boto core team has switched to +a generated client for RDS (``boto.rds2.layer1.RDSConnection``). + +However, this generated variant is not backward-compatible with the older +``boto.rds.RDSConnection``. This document is to help you update your code +(as desired) to take advantage of the latest API calls. + +For the duration of the document, **RDS2Connection** refers to +``boto.rds2.layer1.RDSConnection``, where **RDSConnection** refers to +``boto.rds.RDSConnection``. + + +Prominent Differences +===================== + +* The new **RDS2Connection** maps very closely to the `official API operations`_, + where the old **RDSConnection** had non-standard & inconsistent method names. +* **RDS2Connection** almost always returns a Python dictionary that maps + closely to the API output. **RDSConnection** returned Python objects. +* **RDS2Connection** is much more verbose in terms of output. Tools like + `jmespath`_ or `jsonq`_ can make handling these sometimes complex dictionaries more + manageable. + +.. _`official API operations`: http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/Welcome.html +.. _`jmespath`: https://github.com/boto/jmespath +.. _`jsonq`: https://github.com/edmund-huber/jsonq + + +Method Renames +============== + +Format is ``old_method_name`` -> ``new_method_name``: + +* ``authorize_dbsecurity_group`` -> ``authorize_db_security_group_ingress`` +* ``create_dbinstance`` -> ``create_db_instance`` +* ``create_dbinstance_read_replica`` -> ``create_db_instance_read_replica`` +* ``create_parameter_group`` -> ``create_db_parameter_group`` +* ``get_all_dbsnapshots`` -> ``describe_db_snapshots`` +* ``get_all_events`` -> ``describe_events`` +* ``modify_dbinstance`` -> ``modify_db_instance`` +* ``reboot_dbinstance`` -> ``reboot_db_instance`` +* ``restore_dbinstance_from_dbsnapshot`` -> ``restore_db_instance_from_db_snapshot`` +* ``restore_dbinstance_from_point_in_time`` -> ``restore_db_instance_to_point_in_time`` +* ``revoke_dbsecurity_group`` -> ``revoke_db_security_group_ingress`` + + +Parameter Changes +================= + +Many parameter names have changed between **RDSConnection** & +**RDS2Connection**. For instance, the old name for the instance identifier was +``id``, where the new name is ``db_instance_identifier``. These changes are to +ensure things map more closely to the API. + +In addition, in some cases, ordering & required-ness of parameters has changed +as well. For instance, in ``create_db_instance``, the +``engine`` parameter is now required (previously defaulted to ``MySQL5.1``) & +its position in the call has change to be before ``master_username``. + +As such, when updating your API calls, you should check the +API Reference documentation to ensure you're passing the +correct parameters. + + +Return Values +============= + +**RDSConnection** frequently returned higher-level Python objects. In contrast, +**RDS2Connection** returns Python dictionaries of the data. This will require +a bit more work to extract the necessary values. For example:: + + # Old + >>> instances = rds1_conn.get_all_dbinstances() + >>> inst = instances[0] + >>> inst.name + 'test-db' + + # New + >>> instances = rds2_conn.describe_db_instances() + >>> inst = instances['DescribeDBInstancesResponse']\ + ... ['DescribeDBInstancesResult']['DBInstances'][0] + >>> inst['DBName'] + 'test-db' diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/porting_guide.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/porting_guide.rst new file mode 100644 index 0000000000000000000000000000000000000000..1fd77f73540e1d11cb1e005c00305d54a7869ffc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/porting_guide.rst @@ -0,0 +1,67 @@ +Porting Guide +============= +Boto supports Python versions 2.6, 2.7, 3.3, and 3.4. Python 3 support +is on a per-module basis. This guide will help you to get started porting +a Boto module to Python 3. + +Please read the :doc:`Contributing Guide ` before getting +started. + +Compat Module +------------- +Boto ships with a ``boto.compat`` module that helps to abstract the +differences between Python versions. A vendored version of the ``six`` +module is exposed through ``boto.compat.six``, as well as a handful of +moved functions used throughout the codebase:: + + # Import the six module + from boto.compat import six + + # Other useful imports + from boto.compat import BytesIO, StringIO + from boto.compat import http_client + from boto.compat import urlparse + +Please check the ``boto.compat`` module before writing your own logic +around specialized behavior for different Python versions. Feel free +to add new functionality here, too. + +Porting Steps +------------- +Please follow the following steps when porting a module: + +* Install Python versions and ``pip install tox`` +* Port your module to support Python 3. These help: + + * `Six documentation`_ + * `Porting to Python 3 An in-depth guide`_ + * `Porting to Python 3 Redux`_ + +* Whitelist your module's unit tests in ``tests/test.py`` +* Make sure unit tests pass by running ``tox`` +* Try running integration tests:: + + tox tests/integration/yourmodule + + # You can also run against a specific Python version: + tox -e py26 tests/integration/yourmodule + +* Fix any failing tests. This is the fun part! +* If code you modified is not covered by tests, try to cover it with + existing tests or write new tests. Here is how you can generate a + coverage report in ``cover/index.html``:: + + # Run a test with coverage + tox -e py33 -- default --with-coverage --cover-html --cover-package boto + +* Update ``README.rst`` and ``docs/source/index.rst`` to label your module + as supporting Python 3 +* Submit a pull request! + +Note: We try our best to clean up resources after a test runs, but you should +double check that no resources are left after integration tests run. If they +are, then you will be charged for them! + +.. _Six documentation: http://pythonhosted.org/six/ +.. _Porting to Python 3 An in-depth guide: http://python3porting.com/ +.. _Porting to Python 3 Redux: http://lucumr.pocoo.org/2013/5/21/porting-to-python-3-redux/ diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/rds_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/rds_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..e648528d1b290d92093528566f84ae4c8670b6b3 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/rds_tut.rst @@ -0,0 +1,117 @@ +.. _rds_tut: + +======================================= +An Introduction to boto's RDS interface +======================================= + +This tutorial focuses on the boto interface to the Relational Database Service +from Amazon Web Services. This tutorial assumes that you have boto already +downloaded and installed, and that you wish to setup a MySQL instance in RDS. + +.. warning:: + + This tutorial covers the **ORIGINAL** module for RDS. + It has since been supplanted by a second major version & an + updated API complete with all service operations. The documentation for the + new version of boto's support for RDS is at + :doc:`RDS v2 `. + + +Creating a Connection +--------------------- +The first step in accessing RDS is to create a connection to the service. +The recommended method of doing this is as follows:: + + >>> import boto.rds + >>> conn = boto.rds.connect_to_region( + ... "us-west-2", + ... aws_access_key_id=', + ... aws_secret_access_key='') + +At this point the variable conn will point to an RDSConnection object in the +US-WEST-2 region. Bear in mind that just as any other AWS service, RDS is +region-specific. In this example, the AWS access key and AWS secret key are +passed in to the method explicitely. Alternatively, you can set the environment +variables: + +* ``AWS_ACCESS_KEY_ID`` - Your AWS Access Key ID +* ``AWS_SECRET_ACCESS_KEY`` - Your AWS Secret Access Key + +and then simply call:: + + >>> import boto.rds + >>> conn = boto.rds.connect_to_region("us-west-2") + +In either case, conn will point to an RDSConnection object which we will +use throughout the remainder of this tutorial. + +Starting an RDS Instance +------------------------ + +Creating a DB instance is easy. You can do so as follows:: + + >>> db = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + +This example would create a DB identified as ``db-master-1`` with 10GB of +storage. This instance would be running on ``db.m1.small`` type, with the login +name being ``root``, and the password ``hunter2``. + +To check on the status of your RDS instance, you will have to query the RDS connection again:: + + >>> instances = conn.get_all_dbinstances("db-master-1") + >>> instances + [DBInstance:db-master-1] + >>> db = instances[0] + >>> db.status + u'available' + >>> db.endpoint + (u'db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306) + +Creating a Security Group +------------------------- + +Before you can actually connect to this RDS service, you must first +create a security group. You can add a CIDR range or an :py:class:`EC2 security +group ` to your :py:class:`DB security +group ` :: + + >>> sg = conn.create_dbsecurity_group('web_servers', 'Web front-ends') + >>> sg.authorize(cidr_ip='10.3.2.45/32') + True + +You can then associate this security group with your RDS instance:: + + >>> db.modify(security_groups=[sg]) + + +Connecting to your New Database +------------------------------- + +Once you have reached this step, you can connect to your RDS instance as you +would with any other MySQL instance:: + + >>> db.endpoint + (u'db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306) + + % mysql -h db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com -u root -phunter2 + mysql> + + +Making a backup +--------------- + +You can also create snapshots of your database very easily:: + + >>> db.snapshot('db-master-1-2013-02-05') + DBSnapshot:db-master-1-2013-02-05 + + +Once this snapshot is complete, you can create a new database instance from +it:: + + >>> db2 = conn.restore_dbinstance_from_dbsnapshot( + ... 'db-master-1-2013-02-05', + ... 'db-restored-1', + ... 'db.m1.small', + ... 'us-west-2') + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/autoscale.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/autoscale.rst new file mode 100644 index 0000000000000000000000000000000000000000..aed3d5269c4fbcdddaa18f2926f070928994e8da --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/autoscale.rst @@ -0,0 +1,70 @@ +.. ref-autoscale + +====================== +Auto Scaling Reference +====================== + +boto.ec2.autoscale +------------------ + +.. automodule:: boto.ec2.autoscale + :members: + :undoc-members: + +boto.ec2.autoscale.activity +--------------------------- + +.. automodule:: boto.ec2.autoscale.activity + :members: + :undoc-members: + +boto.ec2.autoscale.group +------------------------ + +.. automodule:: boto.ec2.autoscale.group + :members: + :undoc-members: + + +boto.ec2.autoscale.instance +--------------------------- + +.. automodule:: boto.ec2.autoscale.instance + :members: + :undoc-members: + +boto.ec2.autoscale.launchconfig +------------------------------- + +.. automodule:: boto.ec2.autoscale.launchconfig + :members: + :undoc-members: + +boto.ec2.autoscale.policy +-------------------------- + +.. automodule:: boto.ec2.autoscale.policy + :members: + :undoc-members: + +boto.ec2.autoscale.request +-------------------------- + +.. automodule:: boto.ec2.autoscale.request + :members: + :undoc-members: + +boto.ec2.autoscale.scheduled +---------------------------- + +.. automodule:: boto.ec2.autoscale.scheduled + :members: + :undoc-members: + + +boto.ec2.autoscale.tag +---------------------------- + +.. automodule:: boto.ec2.autoscale.tag + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/awslamba.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/awslamba.rst new file mode 100644 index 0000000000000000000000000000000000000000..68db54678d7b2bb563a0b9b68a399b67bf888a0f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/awslamba.rst @@ -0,0 +1,26 @@ +.. ref-awslambda + +========== +AWS Lambda +========== + +boto.awslambda +-------------- + +.. automodule:: boto.awslambda + :members: + :undoc-members: + +boto.awslambda.layer1 +--------------------- + +.. automodule:: boto.awslambda.layer1 + :members: + :undoc-members: + +boto.awslambda.exceptions +------------------------- + +.. automodule:: boto.awslambda.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/beanstalk.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/beanstalk.rst new file mode 100644 index 0000000000000000000000000000000000000000..e65a468c0809d5408534bdb28ff0919a9a19d441 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/beanstalk.rst @@ -0,0 +1,26 @@ +.. ref-beanstalk + +================= +Elastic Beanstalk +================= + +boto.beanstalk +-------------- + +.. automodule:: boto.beanstalk + :members: + :undoc-members: + +boto.beanstalk.layer1 +--------------------- + +.. automodule:: boto.beanstalk.layer1 + :members: + :undoc-members: + +boto.beanstalk.response +----------------------- + +.. automodule:: boto.beanstalk.response + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/boto.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/boto.rst new file mode 100644 index 0000000000000000000000000000000000000000..5a241b34601d17196b82bde5857dfd8614c34132 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/boto.rst @@ -0,0 +1,47 @@ +.. _ref-boto: + +==== +boto +==== + +boto +---- + +.. automodule:: boto + :members: + :undoc-members: + +boto.connection +--------------- + +.. automodule:: boto.connection + :members: + :undoc-members: + +boto.exception +-------------- + +.. automodule:: boto.exception + :members: + :undoc-members: + +boto.handler +------------ + +.. automodule:: boto.handler + :members: + :undoc-members: + +boto.resultset +-------------- + +.. automodule:: boto.resultset + :members: + :undoc-members: + +boto.utils +---------- + +.. automodule:: boto.utils + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudformation.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudformation.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e0ab41fbb56c699036642241784a4ec7525b1b7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudformation.rst @@ -0,0 +1,34 @@ +.. ref-cloudformation + +============== +cloudformation +============== + +boto.cloudformation +------------------- + +.. automodule:: boto.cloudformation + :members: + :undoc-members: + +boto.cloudformation.connection +------------------------------ + +.. automodule:: boto.cloudformation.connection + :members: + :undoc-members: + +boto.cloudformation.stack +------------------------- + +.. automodule:: boto.cloudformation.stack + :members: + :undoc-members: + +boto.cloudformation.template +---------------------------- + +.. automodule:: boto.cloudformation.template + :members: + :undoc-members: + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudfront.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudfront.rst new file mode 100644 index 0000000000000000000000000000000000000000..5b8df140a1d204de3fc7664c421739f7cd09c20b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudfront.rst @@ -0,0 +1,68 @@ +.. ref-cloudfront + +========== +CloudFront +========== + +boto.cloudfront +--------------- + +.. automodule:: boto.cloudfront + :members: + :undoc-members: + +boto.cloudfront.distribution +---------------------------- + +.. automodule:: boto.cloudfront.distribution + :members: + :undoc-members: + +boto.cloudfront.origin +---------------------- + +.. automodule:: boto.cloudfront.origin + :members: + :undoc-members: + +boto.cloudfront.identity +------------------------ + +.. automodule:: boto.cloudfront.identity + :members: + :undoc-members: + +boto.cloudfront.signers +----------------------- + +.. automodule:: boto.cloudfront.signers + :members: + :undoc-members: + +boto.cloudfront.invalidation +---------------------------- + +.. automodule:: boto.cloudfront.invalidation + :members: + :undoc-members: + +boto.cloudfront.object +---------------------- + +.. automodule:: boto.cloudfront.object + :members: + :undoc-members: + +boto.cloudfront.logging +----------------------- + +.. automodule:: boto.cloudfront.logging + :members: + :undoc-members: + +boto.cloudfront.exception +------------------------- + +.. automodule:: boto.cloudfront.exception + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudhsm.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudhsm.rst new file mode 100644 index 0000000000000000000000000000000000000000..591c5594583a531bd9d47527982c19b506d2df93 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudhsm.rst @@ -0,0 +1,26 @@ +.. ref-cloudhsm + +======== +CloudHSM +======== + +boto.cloudhsm +------------- + +.. automodule:: boto.cloudhsm + :members: + :undoc-members: + +boto.cloudhsm.layer1 +-------------------- + +.. automodule:: boto.cloudhsm.layer1 + :members: + :undoc-members: + +boto.cloudhsm.exceptions +------------------------ + +.. automodule:: boto.cloudhsm.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudsearch.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudsearch.rst new file mode 100644 index 0000000000000000000000000000000000000000..bac2d8666371b1b84440745aa8f1a73c09199053 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudsearch.rst @@ -0,0 +1,61 @@ +.. ref-cloudsearch + +=========== +Cloudsearch +=========== + +boto.cloudsearch +---------------- + +.. automodule:: boto.cloudsearch + :members: + :undoc-members: + +boto.cloudsearch.domain +----------------------- + +.. automodule:: boto.cloudsearch.domain + :members: + :undoc-members: + +boto.cloudsearch.exceptions +----------------------- + +.. automodule:: boto.cloudsearch.exceptions + :members: + :undoc-members: + +boto.cloudsearch.layer1 +----------------------- + +.. automodule:: boto.cloudsearch.layer1 + :members: + :undoc-members: + +boto.cloudsearch.layer2 +----------------------- + +.. automodule:: boto.cloudsearch.layer2 + :members: + :undoc-members: + +boto.cloudsearch.optionstatus +----------------------------- + +.. automodule:: boto.cloudsearch.optionstatus + :members: + :undoc-members: + +boto.cloudsearch.search +----------------------- + +.. automodule:: boto.cloudsearch.search + :members: + :undoc-members: + +boto.cloudsearch.document +------------------------- + +.. automodule:: boto.cloudsearch.document + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudsearch2.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudsearch2.rst new file mode 100644 index 0000000000000000000000000000000000000000..64edff19b71bfadfe251685f476e3cd68d2a9bd3 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudsearch2.rst @@ -0,0 +1,54 @@ +.. ref-cloudsearch2 + +=========== +Cloudsearch +=========== + +boto.cloudsearch2 +----------------- + +.. automodule:: boto.cloudsearch2 + :members: + :undoc-members: + +boto.cloudsearch2.domain +------------------------ + +.. automodule:: boto.cloudsearch2.domain + :members: + :undoc-members: + +boto.cloudsearch2.layer1 +------------------------ + +.. automodule:: boto.cloudsearch2.layer1 + :members: + :undoc-members: + +boto.cloudsearch2.layer2 +------------------------ + +.. automodule:: boto.cloudsearch2.layer2 + :members: + :undoc-members: + +boto.cloudsearch2.optionstatus +------------------------------ + +.. automodule:: boto.cloudsearch2.optionstatus + :members: + :undoc-members: + +boto.cloudsearch2.search +------------------------ + +.. automodule:: boto.cloudsearch2.search + :members: + :undoc-members: + +boto.cloudsearch2.document +-------------------------- + +.. automodule:: boto.cloudsearch2.document + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudsearchdomain.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudsearchdomain.rst new file mode 100644 index 0000000000000000000000000000000000000000..72c0f0ad2d3eb9a8713c00fa53501677af009a16 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudsearchdomain.rst @@ -0,0 +1,26 @@ +.. ref-cloudsearchdomain + +================== +CloudSearch Domain +================== + +boto.cloudsearchdomain +---------------------- + +.. automodule:: boto.cloudsearchdomain + :members: + :undoc-members: + +boto.cloudsearchdomain.layer1 +----------------------------- + +.. automodule:: boto.cloudsearchdomain.layer1 + :members: + :undoc-members: + +boto.cloudsearchdomain.exceptions +--------------------------------- + +.. automodule:: boto.cloudsearchdomain.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudtrail.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudtrail.rst new file mode 100644 index 0000000000000000000000000000000000000000..a2ae61222b208aaa4e5f61ed676c9eabdfe6ae31 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudtrail.rst @@ -0,0 +1,26 @@ +.. _ref-cloudtrail: + +========== +CloudTrail +========== + +boto.cloudtrail +--------------- + +.. automodule:: boto.cloudtrail + :members: + :undoc-members: + +boto.cloudtrail.layer1 +---------------------- + +.. automodule:: boto.cloudtrail.layer1 + :members: + :undoc-members: + +boto.cloudtrail.exceptions +-------------------------- + +.. automodule:: boto.cloudtrail.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudwatch.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudwatch.rst new file mode 100644 index 0000000000000000000000000000000000000000..ae38d89bb385a008b6b6fed60a753298cfdb6702 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cloudwatch.rst @@ -0,0 +1,34 @@ +.. ref-cloudwatch + +==================== +CloudWatch Reference +==================== + +boto.ec2.cloudwatch +------------------- + +.. automodule:: boto.ec2.cloudwatch + :members: + :undoc-members: + +boto.ec2.cloudwatch.datapoint +----------------------------- + +.. automodule:: boto.ec2.cloudwatch.datapoint + :members: + :undoc-members: + +boto.ec2.cloudwatch.metric +-------------------------- + +.. automodule:: boto.ec2.cloudwatch.metric + :members: + :undoc-members: + +boto.ec2.cloudwatch.alarm +-------------------------- + +.. automodule:: boto.ec2.cloudwatch.alarm + :members: + :undoc-members: + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/codedeploy.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/codedeploy.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e3998a4589b230851cd9082a2a59407e52606df --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/codedeploy.rst @@ -0,0 +1,26 @@ +.. ref-codedeploy + +========== +CodeDeploy +========== + +boto.codedeploy +--------------- + +.. automodule:: boto.codedeploy + :members: + :undoc-members: + +boto.codedeploy.layer1 +------------------- + +.. automodule:: boto.codedeploy.layer1 + :members: + :undoc-members: + +boto.codedeploy.exceptions +----------------------- + +.. automodule:: boto.codedeploy.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cognito-identity.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cognito-identity.rst new file mode 100644 index 0000000000000000000000000000000000000000..65ec98ff9cd238d74de4ef85eec7b040471c9f89 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cognito-identity.rst @@ -0,0 +1,26 @@ +.. ref-cognito-identity + +================ +Cognito Identity +================ + +boto.cognito.identity +--------------------- + +.. automodule:: boto.cognito.identity + :members: + :undoc-members: + +boto.cognito.identity.layer1 +---------------------------- + +.. automodule:: boto.cognito.identity.layer1 + :members: + :undoc-members: + +boto.cognito.identity.exceptions +-------------------------------- + +.. automodule:: boto.cognito.identity.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cognito-sync.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cognito-sync.rst new file mode 100644 index 0000000000000000000000000000000000000000..a8a6dd406d51868b14286a6b6c441f98f78718fc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/cognito-sync.rst @@ -0,0 +1,26 @@ +.. ref-cognito-sync + +============ +Cognito Sync +============ + +boto.cognito.sync +----------------- + +.. automodule:: boto.cognito.sync + :members: + :undoc-members: + +boto.cognito.sync.layer1 +------------------------ + +.. automodule:: boto.cognito.sync.layer1 + :members: + :undoc-members: + +boto.cognito.sync.exceptions +---------------------------- + +.. automodule:: boto.cognito.sync.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/configservice.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/configservice.rst new file mode 100644 index 0000000000000000000000000000000000000000..d5e43af90ce9ee101ae60a5174de7a9fc7e8d3e1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/configservice.rst @@ -0,0 +1,26 @@ +.. ref-configservice + +====== +Config +====== + +boto.configservice +------------------ + +.. automodule:: boto.configservice + :members: + :undoc-members: + +boto.configservice.layer1 +------------------------- + +.. automodule:: boto.configservice.layer1 + :members: + :undoc-members: + +boto.configservice.exceptions +----------------------------- + +.. automodule:: boto.configservice.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/contrib.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/contrib.rst new file mode 100644 index 0000000000000000000000000000000000000000..39ef54f8c837d41f1e625c4d87c6ab92b32e67c8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/contrib.rst @@ -0,0 +1,19 @@ +.. ref-contrib + +======= +contrib +======= + +boto.contrib +------------ + +.. automodule:: boto.contrib + :members: + :undoc-members: + +boto.contrib.ymlmessage +----------------------- + +.. automodule:: boto.contrib.ymlmessage + :members: + :undoc-members: \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/datapipeline.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/datapipeline.rst new file mode 100644 index 0000000000000000000000000000000000000000..316147cd177165eda0c8ec412874080ca9595c40 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/datapipeline.rst @@ -0,0 +1,26 @@ +.. _ref-datapipeline: + +============= +Data Pipeline +============= + +boto.datapipeline +----------------- + +.. automodule:: boto.datapipeline + :members: + :undoc-members: + +boto.datapipeline.layer1 +------------------------ + +.. automodule:: boto.datapipeline.layer1 + :members: + :undoc-members: + +boto.datapipeline.exceptions +---------------------------- + +.. automodule:: boto.datapipeline.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/dynamodb.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/dynamodb.rst new file mode 100644 index 0000000000000000000000000000000000000000..00a1375690c385ae05c6279bab6f22dc792c977d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/dynamodb.rst @@ -0,0 +1,61 @@ +.. ref-dynamodb + +======== +DynamoDB +======== + +boto.dynamodb +------------- + +.. automodule:: boto.dynamodb + :members: + :undoc-members: + +boto.dynamodb.layer1 +-------------------- + +.. automodule:: boto.dynamodb.layer1 + :members: + :undoc-members: + +boto.dynamodb.layer2 +-------------------- + +.. automodule:: boto.dynamodb.layer2 + :members: + :undoc-members: + +boto.dynamodb.table +------------------- + +.. automodule:: boto.dynamodb.table + :members: + :undoc-members: + +boto.dynamodb.schema +-------------------- + +.. automodule:: boto.dynamodb.schema + :members: + :undoc-members: + +boto.dynamodb.item +------------------ + +.. automodule:: boto.dynamodb.item + :members: + :undoc-members: + +boto.dynamodb.batch +------------------- + +.. automodule:: boto.dynamodb.batch + :members: + :undoc-members: + +boto.dynamodb.types +------------------- + +.. automodule:: boto.dynamodb.types + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/dynamodb2.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/dynamodb2.rst new file mode 100644 index 0000000000000000000000000000000000000000..97db6b023e184659de2224c7e2b77a009190f530 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/dynamodb2.rst @@ -0,0 +1,61 @@ +.. ref-dynamodb2 + +========= +DynamoDB2 +========= + +High-Level API +============== + +boto.dynamodb2.fields +--------------------- + +.. automodule:: boto.dynamodb2.fields + :members: + :undoc-members: + +boto.dynamodb2.items +-------------------- + +.. automodule:: boto.dynamodb2.items + :members: + :undoc-members: + +boto.dynamodb2.results +---------------------- + +.. automodule:: boto.dynamodb2.results + :members: + :undoc-members: + +boto.dynamodb2.table +-------------------- + +.. automodule:: boto.dynamodb2.table + :members: + :undoc-members: + + +Low-Level API +============= + +boto.dynamodb2 +-------------- + +.. automodule:: boto.dynamodb2 + :members: + :undoc-members: + +boto.dynamodb2.layer1 +--------------------- + +.. automodule:: boto.dynamodb2.layer1 + :members: + :undoc-members: + +boto.dynamodb2.exceptions +------------------------- + +.. automodule:: boto.dynamodb2.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/ec2.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/ec2.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d06fa8c26b42bc60f0e2230ee20d44011d14278 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/ec2.rst @@ -0,0 +1,195 @@ +.. ref-ec2 + +=== +EC2 +=== + +boto.ec2 +-------- + +.. automodule:: boto.ec2 + :members: + :undoc-members: + +boto.ec2.address +---------------- + +.. automodule:: boto.ec2.address + :members: + :undoc-members: + +boto.ec2.autoscale +------------------- + +See the :doc:`Auto Scaling Reference `. + +boto.ec2.blockdevicemapping +--------------------------- + +.. automodule:: boto.ec2.blockdevicemapping + :members: + :undoc-members: + +boto.ec2.buyreservation +----------------------- + +.. automodule:: boto.ec2.buyreservation + :members: + :undoc-members: + +boto.ec2.cloudwatch +------------------- + +See the :doc:`CloudWatch Reference `. + +boto.ec2.connection +------------------- + +.. automodule:: boto.ec2.connection + :members: + :undoc-members: + +boto.ec2.ec2object +------------------ + +.. automodule:: boto.ec2.ec2object + :members: + :undoc-members: + +boto.ec2.elb +------------ + +See the :doc:`ELB Reference `. + +boto.ec2.group +-------------- + +.. automodule:: boto.ec2.group + :members: + :undoc-members: + +boto.ec2.image +-------------- + +.. automodule:: boto.ec2.image + :members: + :undoc-members: + +boto.ec2.instance +----------------- + +.. automodule:: boto.ec2.instance + :members: + :undoc-members: + +boto.ec2.instanceinfo +--------------------- + +.. automodule:: boto.ec2.instanceinfo + :members: + :undoc-members: + +boto.ec2.instancestatus +----------------------- + +.. automodule:: boto.ec2.instancestatus + :members: + :undoc-members: + +boto.ec2.keypair +---------------- + +.. automodule:: boto.ec2.keypair + :members: + :undoc-members: + +boto.ec2.launchspecification +---------------------------- + +.. automodule:: boto.ec2.launchspecification + :members: + :undoc-members: + +boto.ec2.networkinterface +------------------------- + +.. automodule:: boto.ec2.networkinterface + :members: + :undoc-members: + +boto.ec2.placementgroup +----------------------- + +.. automodule:: boto.ec2.placementgroup + :members: + :undoc-members: + +boto.ec2.regioninfo +------------------- + +.. automodule:: boto.ec2.regioninfo + :members: + :undoc-members: + +boto.ec2.reservedinstance +------------------------- + +.. automodule:: boto.ec2.reservedinstance + :members: + :undoc-members: + +boto.ec2.securitygroup +---------------------- + +.. automodule:: boto.ec2.securitygroup + :members: + :undoc-members: + +boto.ec2.snapshot +----------------- + +.. automodule:: boto.ec2.snapshot + :members: + :undoc-members: + +boto.ec2.spotinstancerequest +---------------------------- + +.. automodule:: boto.ec2.spotinstancerequest + :members: + :undoc-members: + +boto.ec2.tag +------------ + +.. automodule:: boto.ec2.tag + :members: + :undoc-members: + +boto.ec2.vmtype +--------------- + +.. automodule:: boto.ec2.vmtype + :members: + :undoc-members: + +boto.ec2.volume +--------------- + +.. automodule:: boto.ec2.volume + :members: + :undoc-members: + +boto.ec2.volumestatus +--------------------- + +.. automodule:: boto.ec2.volumestatus + :members: + :undoc-members: + +boto.ec2.zone +------------- + +.. automodule:: boto.ec2.zone + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/ec2containerservice.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/ec2containerservice.rst new file mode 100644 index 0000000000000000000000000000000000000000..96ca539a0db8e75ee9d07e767f6669c03e45202a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/ec2containerservice.rst @@ -0,0 +1,26 @@ +.. ref-ec2containerservice + +===================== +EC2 Container Service +===================== + +boto.ec2containerservice +------------------------ + +.. automodule:: boto.ec2containerservice + :members: + :undoc-members: + +boto.ec2containerservice.layer1 +------------------------------- + +.. automodule:: boto.ec2containerservice.layer1 + :members: + :undoc-members: + +boto.ec2containerservice.exceptions +----------------------------------- + +.. automodule:: boto.ec2containerservice.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/ecs.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/ecs.rst new file mode 100644 index 0000000000000000000000000000000000000000..97613b46b5461fcca15fbfa158351f4cfec69f29 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/ecs.rst @@ -0,0 +1,19 @@ +.. ref-ecs + +=== +ECS +=== + +boto.ecs +-------- + +.. automodule:: boto.ecs + :members: + :undoc-members: + +boto.ecs.item +---------------------------- + +.. automodule:: boto.ecs.item + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/elasticache.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/elasticache.rst new file mode 100644 index 0000000000000000000000000000000000000000..9d08a17b26d3220f98030c8f48e699047aa93240 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/elasticache.rst @@ -0,0 +1,19 @@ +.. ref-elasticache + +================== +Amazon ElastiCache +================== + +boto.elasticache +---------------- + +.. automodule:: boto.elasticache + :members: + :undoc-members: + +boto.elasticache.layer1 +----------------------- + +.. automodule:: boto.elasticache.layer1 + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/elastictranscoder.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/elastictranscoder.rst new file mode 100644 index 0000000000000000000000000000000000000000..b59eeac7f87385028dd6fa9ff3855cabde7666c0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/elastictranscoder.rst @@ -0,0 +1,26 @@ +.. _ref-elastictranscoder: + +================== +Elastic Transcoder +================== + +boto.elastictranscoder +---------------------- + +.. automodule:: boto.elastictranscoder + :members: + :undoc-members: + +boto.elastictranscoder.layer1 +----------------------------- + +.. automodule:: boto.elastictranscoder.layer1 + :members: + :undoc-members: + +boto.elastictranscoder.exceptions +--------------------------------- + +.. automodule:: boto.elastictranscoder.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/elb.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/elb.rst new file mode 100644 index 0000000000000000000000000000000000000000..f64517e399f143b78642cdc39f288b60cfb3ed70 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/elb.rst @@ -0,0 +1,67 @@ +.. ref-elb + +============= +ELB Reference +============= + +boto.ec2.elb +------------ + +.. automodule:: boto.ec2.elb + :members: + :undoc-members: + +boto.ec2.elb.healthcheck +------------------------ + +.. automodule:: boto.ec2.elb.healthcheck + :members: + :undoc-members: + +boto.ec2.elb.instancestate +-------------------------- + +.. automodule:: boto.ec2.elb.instancestate + :members: + :undoc-members: + +boto.ec2.elb.listelement +------------------------ + +.. automodule:: boto.ec2.elb.listelement + :members: + :undoc-members: + +boto.ec2.elb.listener +--------------------- + +.. automodule:: boto.ec2.elb.listener + :members: + :undoc-members: + +boto.ec2.elb.loadbalancer +------------------------- + +.. automodule:: boto.ec2.elb.loadbalancer + :members: + :undoc-members: + +boto.ec2.elb.policies +--------------------- + +.. automodule:: boto.ec2.elb.policies + :members: + :undoc-members: + +boto.ec2.elb.securitygroup +-------------------------- + +.. automodule:: boto.ec2.elb.securitygroup + :members: + :undoc-members: + +boto.ec2.elb.attributes +----------------------- +.. automodule:: boto.ec2.elb.attributes + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/emr.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/emr.rst new file mode 100644 index 0000000000000000000000000000000000000000..4392d24298001b04833acefcda837eb1a1148f30 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/emr.rst @@ -0,0 +1,34 @@ +.. _ref-emr: + +=== +EMR +=== + +boto.emr +-------- + +.. automodule:: boto.emr + :members: + :undoc-members: + +boto.emr.connection +------------------- + +.. automodule:: boto.emr.connection + :members: + :undoc-members: + +boto.emr.step +------------- + +.. automodule:: boto.emr.step + :members: + :undoc-members: + +boto.emr.emrobject +------------------ + +.. automodule:: boto.emr.emrobject + :members: + :undoc-members: + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/file.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/file.rst new file mode 100644 index 0000000000000000000000000000000000000000..f12847706af90b62027754638602f93b98a3ed86 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/file.rst @@ -0,0 +1,34 @@ +.. ref-s3: + +==== +file +==== + +boto.file.bucket +---------------- + +.. automodule:: boto.file.bucket + :members: + :undoc-members: + +boto.file.simpleresultset +------------------------- + +.. automodule:: boto.file.simpleresultset + :members: + :undoc-members: + +boto.file.connection +-------------------- + +.. automodule:: boto.file.connection + :members: + :undoc-members: + +boto.file.key +------------- + +.. automodule:: boto.file.key + :members: + :undoc-members: + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/fps.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/fps.rst new file mode 100644 index 0000000000000000000000000000000000000000..c160eee0594ff76380d7b5f3c3b33483dd9c2a08 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/fps.rst @@ -0,0 +1,19 @@ +.. ref-fps + +=== +fps +=== + +boto.fps +-------- + +.. automodule:: boto.fps + :members: + :undoc-members: + +boto.fps.connection +------------------- + +.. automodule:: boto.fps.connection + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/glacier.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/glacier.rst new file mode 100644 index 0000000000000000000000000000000000000000..94edd53cac195b20dc2fe7276093b30562fb3df6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/glacier.rst @@ -0,0 +1,63 @@ +.. ref-glacier + +======= +Glacier +======= + +boto.glacier +------------ + +.. automodule:: boto.glacier + :members: + :undoc-members: + +boto.glacier.layer1 +------------------- + +.. automodule:: boto.glacier.layer1 + :members: + :undoc-members: + +boto.glacier.layer2 +------------------- + +.. automodule:: boto.glacier.layer2 + :members: + :undoc-members: + +boto.glacier.vault +------------------ + +.. automodule:: boto.glacier.vault + :members: + :undoc-members: + +boto.glacier.job +---------------- + +.. automodule:: boto.glacier.job + :members: + :undoc-members: + +boto.glacier.writer +------------------- + +.. automodule:: boto.glacier.writer + :members: + :undoc-members: + +boto.glacier.concurrent +----------------------- + +.. automodule:: boto.glacier.concurrent + :members: + :undoc-members: + +boto.glacier.exceptions +----------------------- + +.. automodule:: boto.glacier.exceptions + :members: + :undoc-members: + + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/gs.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/gs.rst new file mode 100644 index 0000000000000000000000000000000000000000..e411dee3c486aebc394c56a20cdb1fdc3194ef91 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/gs.rst @@ -0,0 +1,72 @@ +.. ref-gs: + +== +GS +== + +boto.gs.acl +----------- + +.. automodule:: boto.gs.acl + :members: + :inherited-members: + :undoc-members: + +boto.gs.bucket +-------------- + +.. automodule:: boto.gs.bucket + :members: + :inherited-members: + :undoc-members: + :exclude-members: BucketPaymentBody, LoggingGroup, MFADeleteRE, VersionRE, + VersioningBody, WebsiteBody, WebsiteErrorFragment, + WebsiteMainPageFragment, startElement, endElement + +boto.gs.bucketlistresultset +--------------------------- + +.. automodule:: boto.gs.bucketlistresultset + :members: + :inherited-members: + :undoc-members: + +boto.gs.connection +------------------ + +.. automodule:: boto.gs.connection + :members: + :inherited-members: + :undoc-members: + +boto.gs.cors +------------ + +.. automodule:: boto.gs.cors + :members: + :undoc-members: + +boto.gs.key +----------- + +.. automodule:: boto.gs.key + :members: + :inherited-members: + :undoc-members: + +boto.gs.user +------------ + +.. automodule:: boto.gs.user + :members: + :inherited-members: + :undoc-members: + +boto.gs.resumable_upload_handler +-------------------------------- + +.. automodule:: boto.gs.resumable_upload_handler + :members: + :inherited-members: + :undoc-members: + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/iam.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/iam.rst new file mode 100644 index 0000000000000000000000000000000000000000..81f7b67e06f9019a5efaf5d5e5ea604b027ffb70 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/iam.rst @@ -0,0 +1,27 @@ +.. ref-iam + +=== +IAM +=== + +boto.iam +-------- + +.. automodule:: boto.iam + :members: + :undoc-members: + +boto.iam.connection +------------------- + +.. automodule:: boto.iam.connection + :members: + :undoc-members: + +boto.iam.summarymap +------------------- + +.. automodule:: boto.iam.summarymap + :members: + :undoc-members: + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/index.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..a477ac06c35c123ffdfd2d876078e79faf37a757 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/index.rst @@ -0,0 +1,42 @@ +.. _ref-index: + +============= +API Reference +============= + +.. toctree:: + :maxdepth: 4 + + boto + beanstalk + cloudformation + cloudfront + cloudsearch + contrib + dynamodb + ec2 + ecs + emr + file + fps + glacier + gs + iam + kinesis + manage + mturk + mws + pyami + rds + redshift + route53 + s3 + sdb + services + ses + sns + sqs + sts + swf + vpc + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/kinesis.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/kinesis.rst new file mode 100644 index 0000000000000000000000000000000000000000..288ee62eed17af06e9790e0da5aacea59478a498 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/kinesis.rst @@ -0,0 +1,26 @@ +.. ref-kinesis + +======= +Kinesis +======= + +boto.kinesis +------------ + +.. automodule:: boto.kinesis + :members: + :undoc-members: + +boto.kinesis.layer1 +------------------- + +.. automodule:: boto.kinesis.layer1 + :members: + :undoc-members: + +boto.kinesis.exceptions +----------------------- + +.. automodule:: boto.kinesis.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/kms.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/kms.rst new file mode 100644 index 0000000000000000000000000000000000000000..85f9e210474f26a366352281234ab9714ca96cf8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/kms.rst @@ -0,0 +1,26 @@ +.. ref-kms + +=== +KMS +=== + +boto.kms +-------- + +.. automodule:: boto.kms + :members: + :undoc-members: + +boto.kms.layer1 +--------------- + +.. automodule:: boto.kms.layer1 + :members: + :undoc-members: + +boto.kms.exceptions +----------------------- + +.. automodule:: boto.kms.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/logs.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/logs.rst new file mode 100644 index 0000000000000000000000000000000000000000..3a0fc34c8c0092835ae9bf8fc9f60ac20e71040e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/logs.rst @@ -0,0 +1,26 @@ +.. _ref-logs: + +=============== +CloudWatch Logs +=============== + +boto.logs +--------------- + +.. automodule:: boto.logs + :members: + :undoc-members: + +boto.logs.layer1 +---------------------- + +.. automodule:: boto.logs.layer1 + :members: + :undoc-members: + +boto.logs.exceptions +-------------------------- + +.. automodule:: boto.logs.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/machinelearning.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/machinelearning.rst new file mode 100644 index 0000000000000000000000000000000000000000..cfa7946747406a20f9050afa4792d92c493064df --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/machinelearning.rst @@ -0,0 +1,26 @@ +.. ref-machinelearning + +================ +Machine Learning +================ + +boto.machinelearning +-------------------- + +.. automodule:: boto.machinelearning + :members: + :undoc-members: + +boto.machinelearning.layer1 +--------------------------- + +.. automodule:: boto.machinelearning.layer1 + :members: + :undoc-members: + +boto.machinelearning.exceptions +------------------------------- + +.. automodule:: boto.machinelearning.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/manage.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/manage.rst new file mode 100644 index 0000000000000000000000000000000000000000..a175d88b99273e56aed1c1495c238fca9ecd8dca --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/manage.rst @@ -0,0 +1,47 @@ +.. ref-manage + +====== +manage +====== + +boto.manage +----------- + +.. automodule:: boto.manage + :members: + :undoc-members: + +boto.manage.cmdshell +-------------------- + +.. automodule:: boto.manage.cmdshell + :members: + :undoc-members: + +boto.manage.propget +------------------- + +.. automodule:: boto.manage.propget + :members: + :undoc-members: + +boto.manage.server +------------------ + +.. automodule:: boto.manage.server + :members: + :undoc-members: + +boto.manage.task +---------------- + +.. automodule:: boto.manage.task + :members: + :undoc-members: + +boto.manage.volume +------------------ + +.. automodule:: boto.manage.volume + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/mturk.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/mturk.rst new file mode 100644 index 0000000000000000000000000000000000000000..b116d371d1a24c28461b77fb03bd33f39b2aeede --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/mturk.rst @@ -0,0 +1,54 @@ +.. ref-mturk + +===== +mturk +===== + +boto.mturk +------------ + +.. automodule:: boto.mturk + :members: + :undoc-members: + +boto.mturk.connection +--------------------- + +.. automodule:: boto.mturk.connection + :members: + :undoc-members: + +boto.mturk.layoutparam +---------------------- + +.. automodule:: boto.mturk.layoutparam + :members: + :undoc-members: + +boto.mturk.notification +----------------------- + +.. automodule:: boto.mturk.notification + :members: + :undoc-members: + +boto.mturk.price +---------------- + +.. automodule:: boto.mturk.price + :members: + :undoc-members: + +boto.mturk.qualification +------------------------ + +.. automodule:: boto.mturk.qualification + :members: + :undoc-members: + +boto.mturk.question +------------------- + +.. automodule:: boto.mturk.question + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/mws.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/mws.rst new file mode 100644 index 0000000000000000000000000000000000000000..df5cc22a22eee722eb7db1132958f86b9eae8d5a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/mws.rst @@ -0,0 +1,33 @@ +.. ref-mws + +=== +mws +=== + +boto.mws +-------- + +.. automodule:: boto.mws + :members: + :undoc-members: + +boto.mws.connection +------------------- + +.. automodule:: boto.mws.connection + :members: + :undoc-members: + +boto.mws.exception +------------------- + +.. automodule:: boto.mws.exception + :members: + :undoc-members: + +boto.mws.response +------------------- + +.. automodule:: boto.mws.response + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/opsworks.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/opsworks.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c761e72c620626f4463c04b52e69090e9017a93 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/opsworks.rst @@ -0,0 +1,28 @@ +.. ref-opsworks + +======== +Opsworks +======== + +boto.opsworks +------------ + +.. automodule:: boto.opsworks + :members: + :undoc-members: + +boto.opsworks.layer1 +------------------- + +.. automodule:: boto.opsworks.layer1 + :members: + :undoc-members: + +boto.opsworks.exceptions +----------------------- + +.. automodule:: boto.opsworks.exceptions + :members: + :undoc-members: + + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/pyami.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/pyami.rst new file mode 100644 index 0000000000000000000000000000000000000000..e573b34dca7fdc0bf41bd4b39dc7df3177c7bcf2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/pyami.rst @@ -0,0 +1,103 @@ +.. ref-pyami + +===== +pyami +===== + +boto.pyami +-------------- + +.. automodule:: boto.pyami + :members: + :undoc-members: + +boto.pyami.bootstrap +-------------------- + +.. automodule:: boto.pyami.bootstrap + :members: + :undoc-members: + +boto.pyami.config +----------------- + +.. automodule:: boto.pyami.config + :members: + :undoc-members: + +boto.pyami.copybot +------------------ + +.. automodule:: boto.pyami.copybot + :members: + :undoc-members: + +boto.pyami.installers +--------------------- + +.. automodule:: boto.pyami.installers + :members: + :undoc-members: + +boto.pyami.installers.ubuntu +---------------------------- + +.. automodule:: boto.pyami.installers.ubuntu + :members: + :undoc-members: + +boto.pyami.installers.ubuntu.apache +----------------------------------- + +.. automodule:: boto.pyami.installers.ubuntu.apache + :members: + :undoc-members: + +boto.pyami.installers.ubuntu.ebs +-------------------------------- + +.. automodule:: boto.pyami.installers.ubuntu.ebs + :members: + :undoc-members: + +boto.pyami.installers.ubuntu.installer +-------------------------------------- + +.. automodule:: boto.pyami.installers.ubuntu.installer + :members: + :undoc-members: + +boto.pyami.installers.ubuntu.mysql +---------------------------------- + +.. automodule:: boto.pyami.installers.ubuntu.mysql + :members: + :undoc-members: + +boto.pyami.installers.ubuntu.trac +--------------------------------- + +.. automodule:: boto.pyami.installers.ubuntu.trac + :members: + :undoc-members: + +boto.pyami.launch_ami +--------------------- + +.. automodule:: boto.pyami.launch_ami + :members: + :undoc-members: + +boto.pyami.scriptbase +--------------------- + +.. automodule:: boto.pyami.scriptbase + :members: + :undoc-members: + +boto.pyami.startup +------------------ + +.. automodule:: boto.pyami.startup + :members: + :undoc-members: \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/rds.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/rds.rst new file mode 100644 index 0000000000000000000000000000000000000000..7f02d33254eb9e03ea2a6fc0fefc93f5c3edc8db --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/rds.rst @@ -0,0 +1,47 @@ +.. ref-rds + +=== +RDS +=== + +boto.rds +-------- + +.. automodule:: boto.rds + :members: + :undoc-members: + +boto.rds.dbinstance +------------------- + +.. automodule:: boto.rds.dbinstance + :members: + :undoc-members: + +boto.rds.dbsecuritygroup +------------------------ + +.. automodule:: boto.rds.dbsecuritygroup + :members: + :undoc-members: + +boto.rds.dbsnapshot +------------------- + +.. automodule:: boto.rds.dbsnapshot + :members: + :undoc-members: + +boto.rds.event +-------------- + +.. automodule:: boto.rds.event + :members: + :undoc-members: + +boto.rds.parametergroup +----------------------- + +.. automodule:: boto.rds.parametergroup + :members: + :undoc-members: \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/rds2.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/rds2.rst new file mode 100644 index 0000000000000000000000000000000000000000..8c8121ecdef7bb1acccfec56a3a6e7b8098b159d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/rds2.rst @@ -0,0 +1,26 @@ +.. ref-rds2 + +==== +RDS2 +==== + +boto.rds2 +--------- + +.. automodule:: boto.rds2 + :members: + :undoc-members: + +boto.rds2.exceptions +-------------------- + +.. automodule:: boto.rds2.exceptions + :members: + :undoc-members: + +boto.rds2.layer1 +---------------- + +.. automodule:: boto.rds2.layer1 + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/redshift.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/redshift.rst new file mode 100644 index 0000000000000000000000000000000000000000..b3d8463630cffc783b32bab3c716f57d9896bdd6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/redshift.rst @@ -0,0 +1,26 @@ +.. _ref-redshift: + +======== +Redshift +======== + +boto.redshift +------------- + +.. automodule:: boto.redshift + :members: + :undoc-members: + +boto.redshift.layer1 +-------------------- + +.. automodule:: boto.redshift.layer1 + :members: + :undoc-members: + +boto.redshift.exceptions +------------------------ + +.. automodule:: boto.redshift.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/route53.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/route53.rst new file mode 100644 index 0000000000000000000000000000000000000000..1d4af2c6141a79f70ccc2584ca1a784509e7c193 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/route53.rst @@ -0,0 +1,55 @@ +.. ref-route53 + +======= +route53 +======= + + +boto.route53.connection +----------------------- + +.. automodule:: boto.route53.connection + :members: + :undoc-members: + +boto.route53.exception +---------------------- + +.. automodule:: boto.route53.exception + :members: + :undoc-members: + +boto.route53.healthcheck +------------------------ + +.. automodule:: boto.route53.healthcheck + :members: + :undoc-members: + +boto.route53.hostedzone +----------------------- + +.. automodule:: boto.route53.hostedzone + :members: + :undoc-members: + +boto.route53.record +------------------- + +.. automodule:: boto.route53.record + :members: + :undoc-members: + +boto.route53.status +------------------- + +.. automodule:: boto.route53.status + :members: + :undoc-members: + +boto.route53.zone +----------------- + +.. automodule:: boto.route53.zone + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/route53domains.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/route53domains.rst new file mode 100644 index 0000000000000000000000000000000000000000..64eca283ef2e2839d5a53913bdbc077571a2d23f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/route53domains.rst @@ -0,0 +1,26 @@ +.. ref-route53domains + +================ +Route 53 Domains +================ + +boto.route53.domains +-------------------- + +.. automodule:: boto.route53.domains + :members: + :undoc-members: + +boto.route53.domains.layer1 +------------------- + +.. automodule:: boto.route53.domains.layer1 + :members: + :undoc-members: + +boto.route53.domains.exceptions +----------------------- + +.. automodule:: boto.route53.domains.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/s3.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/s3.rst new file mode 100644 index 0000000000000000000000000000000000000000..41916fc742eac493215326dffb87b0ca019cf908 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/s3.rst @@ -0,0 +1,111 @@ +.. ref-s3: + +=== +S3 +=== + +boto.s3 +-------- + +.. automodule:: boto.s3 + :members: + :undoc-members: + +boto.s3.acl +----------- + +.. automodule:: boto.s3.acl + :members: + :undoc-members: + +boto.s3.bucket +-------------- + +.. automodule:: boto.s3.bucket + :members: + :undoc-members: + +boto.s3.bucketlistresultset +--------------------------- + +.. automodule:: boto.s3.bucketlistresultset + :members: + :undoc-members: + +boto.s3.connection +------------------ + +.. automodule:: boto.s3.connection + :members: + :undoc-members: + +boto.s3.cors +-------------- + +.. automodule:: boto.s3.cors + :members: + :undoc-members: + +boto.s3.deletemarker +-------------------- + +.. automodule:: boto.s3.deletemarker + :members: + :undoc-members: + +boto.s3.key +----------- + +.. automodule:: boto.s3.key + :members: + :undoc-members: + +boto.s3.prefix +-------------- + +.. automodule:: boto.s3.prefix + :members: + :undoc-members: + +boto.s3.multipart +----------------- + +.. automodule:: boto.s3.multipart + :members: + :undoc-members: + +boto.s3.multidelete +------------------- + +.. automodule:: boto.s3.multidelete + :members: + :undoc-members: + +boto.s3.resumable_download_handler +---------------------------------- + +.. automodule:: boto.s3.resumable_download_handler + :members: + :undoc-members: + +boto.s3.lifecycle +-------------------- + +.. automodule:: boto.s3.lifecycle + :members: + :undoc-members: + +boto.s3.tagging +--------------- + +.. automodule:: boto.s3.tagging + :members: + :undoc-members: + +boto.s3.user +------------ + +.. automodule:: boto.s3.user + :members: + :undoc-members: + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sdb.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sdb.rst new file mode 100644 index 0000000000000000000000000000000000000000..28946f828155c36b277ee0ceb7e52640173fff4d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sdb.rst @@ -0,0 +1,45 @@ +.. ref-sdb + +============= +SDB Reference +============= + +In addition to what is seen below, boto includes an abstraction +layer for SimpleDB that may be used: + +* :doc:`SimpleDB DB ` (Maintained, but little documentation) + +boto.sdb +-------- + +.. automodule:: boto.sdb + :members: + :undoc-members: + +boto.sdb.connection +------------------- + +.. automodule:: boto.sdb.connection + :members: + :undoc-members: + +boto.sdb.domain +--------------- + +.. automodule:: boto.sdb.domain + :members: + :undoc-members: + +boto.sdb.item +------------- + +.. automodule:: boto.sdb.item + :members: + :undoc-members: + +boto.sdb.queryresultset +----------------------- + +.. automodule:: boto.sdb.queryresultset + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sdb_db.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sdb_db.rst new file mode 100644 index 0000000000000000000000000000000000000000..a7594db5be907895c3a05bfaa0a2f932e2eb975c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sdb_db.rst @@ -0,0 +1,70 @@ +.. ref-sdbdb + +================ +SDB DB Reference +================ + +This module offers an ORM-like layer on top of SimpleDB. + +boto.sdb.db +----------- + +.. automodule:: boto.sdb.db + :members: + :undoc-members: + +boto.sdb.db.blob +---------------- + +.. automodule:: boto.sdb.db.blob + :members: + :undoc-members: + +boto.sdb.db.key +--------------- + +.. automodule:: boto.sdb.db.key + :members: + :undoc-members: + +boto.sdb.db.manager +------------------- + +.. automodule:: boto.sdb.db.manager + :members: + :undoc-members: + +boto.sdb.db.manager.sdbmanager +------------------------------ + +.. automodule:: boto.sdb.db.manager.sdbmanager + :members: + :undoc-members: + +boto.sdb.db.manager.xmlmanager +------------------------------ + +.. automodule:: boto.sdb.db.manager.xmlmanager + :members: + :undoc-members: + +boto.sdb.db.model +----------------- + +.. automodule:: boto.sdb.db.model + :members: + :undoc-members: + +boto.sdb.db.property +-------------------- + +.. automodule:: boto.sdb.db.property + :members: + :undoc-members: + +boto.sdb.db.query +----------------- + +.. automodule:: boto.sdb.db.query + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/services.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/services.rst new file mode 100644 index 0000000000000000000000000000000000000000..aa73dcc2742807cee9e7fb3d93291fbd6bc1bf24 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/services.rst @@ -0,0 +1,61 @@ +.. ref-services + +======== +services +======== + +boto.services +------------- + +.. automodule:: boto.services + :members: + :undoc-members: + +boto.services.bs +---------------- + +.. automodule:: boto.services.bs + :members: + :undoc-members: + +boto.services.message +--------------------- + +.. automodule:: boto.services.message + :members: + :undoc-members: + +boto.services.result +-------------------- + +.. automodule:: boto.services.result + :members: + :undoc-members: + +boto.services.service +--------------------- + +.. automodule:: boto.services.service + :members: + :undoc-members: + +boto.services.servicedef +------------------------ + +.. automodule:: boto.services.servicedef + :members: + :undoc-members: + +boto.services.sonofmmm +---------------------- + +.. automodule:: boto.services.sonofmmm + :members: + :undoc-members: + +boto.services.submit +-------------------- + +.. automodule:: boto.services.submit + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/ses.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/ses.rst new file mode 100644 index 0000000000000000000000000000000000000000..d59126a33e7a9e3abc0df0437dab6de3a68151c6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/ses.rst @@ -0,0 +1,21 @@ +.. ref-ses + +=== +SES +=== + + +boto.ses +------------ + +.. automodule:: boto.ses + :members: + :undoc-members: + +boto.ses.connection +--------------------- + +.. automodule:: boto.ses.connection + :members: + :undoc-members: + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sns.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sns.rst new file mode 100644 index 0000000000000000000000000000000000000000..6f840f892dbc0b424507ace775ec27666ed159b8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sns.rst @@ -0,0 +1,17 @@ +.. ref-sns + +=== +SNS +=== + +boto.sns +-------- + +.. automodule:: boto.sns + :members: + :undoc-members: + +.. autoclass:: boto.sns.SNSConnection + :members: + :undoc-members: + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sqs.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sqs.rst new file mode 100644 index 0000000000000000000000000000000000000000..88f1c16100248358422833df6b85277afd7a2a27 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sqs.rst @@ -0,0 +1,61 @@ +.. ref-sqs + +==== +SQS +==== + +boto.sqs +-------- + +.. automodule:: boto.sqs + :members: + :undoc-members: + +boto.sqs.attributes +------------------- + +.. automodule:: boto.sqs.attributes + :members: + :undoc-members: + +boto.sqs.connection +------------------- + +.. automodule:: boto.sqs.connection + :members: + :undoc-members: + +boto.sqs.jsonmessage +-------------------- + +.. automodule:: boto.sqs.jsonmessage + :members: + :undoc-members: + +boto.sqs.message +---------------- + +.. automodule:: boto.sqs.message + :members: + :undoc-members: + +boto.sqs.queue +-------------- + +.. automodule:: boto.sqs.queue + :members: + :undoc-members: + +boto.sqs.regioninfo +------------------- + +.. automodule:: boto.sqs.regioninfo + :members: + :undoc-members: + +boto.sqs.batchresults +--------------------- + +.. automodule:: boto.sqs.batchresults + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sts.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sts.rst new file mode 100644 index 0000000000000000000000000000000000000000..e3ce58184d74ba1488da6a467bb60dacb7e2693e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/sts.rst @@ -0,0 +1,25 @@ +.. ref-sts + +=== +STS +=== + +boto.sts +-------- + +.. automodule:: boto.sts + :members: + :undoc-members: + +.. autoclass:: boto.sts.STSConnection + :members: + :undoc-members: + +boto.sts.credentials +-------------------- + +.. automodule:: boto.sts.credentials + :members: + :undoc-members: + + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/support.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/support.rst new file mode 100644 index 0000000000000000000000000000000000000000..d63d809457a6fdfc3f9faa6d336b7b249db77a36 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/support.rst @@ -0,0 +1,26 @@ +.. _ref-support: + +======= +Support +======= + +boto.support +------------ + +.. automodule:: boto.support + :members: + :undoc-members: + +boto.support.layer1 +------------------- + +.. automodule:: boto.support.layer1 + :members: + :undoc-members: + +boto.support.exceptions +----------------------- + +.. automodule:: boto.support.exceptions + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/swf.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/swf.rst new file mode 100644 index 0000000000000000000000000000000000000000..fbe1db0bb06b844ddd2beb580eecf15ff1364e04 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/swf.rst @@ -0,0 +1,32 @@ +.. ref-swf + +=== +SWF +=== + +boto.swf +-------- + +.. automodule:: boto.swf + :members: + :undoc-members: + +boto.swf.layer1 +-------------------- + +.. automodule:: boto.swf.layer1 + :members: + :undoc-members: + +boto.swf.layer1_decisions +------------------------- + +.. automodule:: boto.swf.layer1_decisions + :members: + :undoc-members: + +boto.swf.layer2 +-------------------- + +.. automodule:: boto.swf.layer2 + :members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ref/vpc.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/vpc.rst new file mode 100644 index 0000000000000000000000000000000000000000..9b231bdcbaf3b7fadd2aa2f7faa37103d2983e17 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ref/vpc.rst @@ -0,0 +1,68 @@ +.. _ref-vpc: + +==== +VPC +==== + +boto.vpc +-------- + +.. automodule:: boto.vpc + :members: + :undoc-members: + +boto.vpc.customergateway +------------------------ + +.. automodule:: boto.vpc.customergateway + :members: + :undoc-members: + +boto.vpc.dhcpoptions +-------------------- + +.. automodule:: boto.vpc.dhcpoptions + :members: + :undoc-members: + +boto.vpc.internetgateway +------------------------ + +.. automodule:: boto.vpc.internetgateway + :members: + :undoc-members: + +boto.vpc.routetable +------------------- + +.. automodule:: boto.vpc.routetable + :members: + :undoc-members: + +boto.vpc.subnet +--------------- + +.. automodule:: boto.vpc.subnet + :members: + :undoc-members: + +boto.vpc.vpc +------------ + +.. automodule:: boto.vpc.vpc + :members: + :undoc-members: + +boto.vpc.vpnconnection +---------------------- + +.. automodule:: boto.vpc.vpnconnection + :members: + :undoc-members: + +boto.vpc.vpngateway +------------------- + +.. automodule:: boto.vpc.vpngateway + :members: + :undoc-members: diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/dev.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/dev.rst new file mode 100644 index 0000000000000000000000000000000000000000..39b096ebd561c98f5539ab87e52c7037fc7f5d86 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/dev.rst @@ -0,0 +1,21 @@ +boto v2.xx.x +============ + +:date: 2013/xx/xx + +This release adds ____. + + +Features +-------- + +* . (:issue:``, :sha:``) + + +Bugfixes +-------- + +* (:issue:``, :sha:``) +* Several documentation improvements/fixes: + + * (:issue:``, :sha:``) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/releasenotes_template.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/releasenotes_template.rst new file mode 100644 index 0000000000000000000000000000000000000000..39b096ebd561c98f5539ab87e52c7037fc7f5d86 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/releasenotes_template.rst @@ -0,0 +1,21 @@ +boto v2.xx.x +============ + +:date: 2013/xx/xx + +This release adds ____. + + +Features +-------- + +* . (:issue:``, :sha:``) + + +Bugfixes +-------- + +* (:issue:``, :sha:``) +* Several documentation improvements/fixes: + + * (:issue:``, :sha:``) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.0.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.0.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..28d385363567ddcd9bf05f14f7a5f9e1a1784b87 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.0.0.rst @@ -0,0 +1,135 @@ +========================== +Release Notes for boto 2.0 +========================== + +Highlights +========== + +There have been many, many changes since the 2.0b4 release. This overview highlights some of those changes. + +* Fix connection pooling bug: don't close before reading. +* Added AddInstanceGroup and ModifyInstanceGroup to boto.emr +* Merge pull request #246 from chetan/multipart_s3put +* AddInstanceGroupsResponse class to boto.emr.emrobject. +* Removed extra print statement +* Merge pull request #244 from ryansb/master +* Added add_instance_groups function to boto.emr.connection. Built some helper methods for it, and added AddInstanceGroupsResponse class to boto.emr.emrobject. +* Added a new class, InstanceGroup, with just a __init__ and __repr__. +* Adding support for GetLoginProfile request to IAM. Removing commented lines in connection.py. Fixes GoogleCode issue 532. +* Fixed issue #195 +* Added correct sax reader for boto.emr.emrobject.BootstrapAction +* Fixed a typo bug in ConsoleOutput sax parsing and some PEP8 cleanup in connection.py. +* Added initial support for generating a registration url for the aws marketplace +* Fix add_record and del_record to support multiple values, like change_record does +* Add support to accept SecurityGroupId as a parameter for ec2 run instances. This is required to create EC2 instances under VPC security groups +* Added support for aliases to the add_change method of ResourceRecordSets. +* Resign each request in a retry situation. Some services are starting to incorporate replay detection algorithms and the boto approach of simply re-trying the original request triggers them. Also a small bug fix to roboto and added a delay in the ec2 test to wait for consistency. +* Fixed a problem with InstanceMonitoring parameter of LaunchConfigurations for autoscale module. +* Route 53 Alias Resource Record Sets +* Fixed App Engine support +* Fixed incorrect host on App Engine +* Fixed issue 199 on github. +* First pass at put_metric_data +* Changed boto.s3.Bucket.set_acl_xml() to ISO-8859-1 encode the Unicode ACL text before sending over HTTP connection. +* Added GetQualificationScore for mturk. +* Added UpdateQualificationScore for mturk +* import_key_pair base64 fix +* Fixes for ses send_email method better handling of exceptions +* Add optional support for SSL server certificate validation. +* Specify a reasonable socket timeout for httplib +* Support for ap-northeast-1 region +* Close issue #153 +* Close issue #154 +* we must POST autoscale user-data, not GET. otherwise a HTTP 505 error is returned from AWS. see: http://groups.google.com/group/boto-dev/browse_thread/thread/d5eb79c97ea8eecf?pli=1 +* autoscale userdata needs to be base64 encoded. +* Use the unversioned streaming jar symlink provided by EMR +* Updated lss3 to allow for prefix based listing (more like actual ls) +* Deal with the groupSet element that appears in the instanceSet element in the DescribeInstances response. +* Add a change_record command to bin/route53 +* Incorporating a patch from AWS to allow security groups to be tagged. +* Fixed an issue with extra headers in generated URLs. Fixes http://code.google.com/p/boto/issues/detail?id=499 +* Incorporating a patch to handle obscure bug in apache/fastcgi. See http://goo.gl/0Tdax. +* Reorganizing the existing test code. Part of a long-term project to completely revamp and improve boto tests. +* Fixed an invalid parameter bug (ECS) #102 +* Adding initial cut at s3 website support. + +Stats +===== + +* 465 commits since boto 2.0b4 +* 70 authors +* 111 Pull requests from 64 different authors + +Contributors (in order of last commits) +======================================= + +* Mitch Garnaat +* Chris Moyer +* Garrett Holmstrom +* Justin Riley +* Steve Johnson +* Sean Talts +* Brian Beach +* Ryan Brown +* Chetan Sarva +* spenczar +* Jonathan Drosdeck +* garnaat +* Nathaniel Moseley +* Bradley Ayers +* jibs +* Kenneth Falck +* chirag +* Sean O'Connor +* Scott Moser +* Vineeth Pillai +* Greg Taylor +* root +* darktable +* flipkin +* brimcfadden +* Samuel Lucidi +* Terence Honles +* Mike Schwartz +* Waldemar Kornewald +* Lucas Hrabovsky +* thaDude +* Vinicius Ruan Cainelli +* David Marin +* Stanislav Ievlev +* Victor Trac +* Dan Fairs +* David Pisoni +* Matt Robenolt +* Matt Billenstein +* rgrp +* vikalp +* Christoph Kern +* Gabriel Monroy +* Ben Burry +* Hinnerk +* Jann Kleen +* Louis R. Marascio +* Matt Singleton +* David Park +* Nick Tarleton +* Cory Mintz +* Robert Mela +* rlotun +* John Walsh +* Keith Fitzgerald +* Pierre Riteau +* ryancustommade +* Fabian Topfstedt +* Michael Thompson +* sanbornm +* Seth Golub +* Jon Colverson +* Steve Howard +* Roberto Gaiser +* James Downs +* Gleicon Moraes +* Blake Maltby +* Mac Morgan +* Rytis Sileika +* winhamwr diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.0b1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.0b1.rst new file mode 100644 index 0000000000000000000000000000000000000000..aefd9023320c88d47214520a8a3ecf7c0afb0f44 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.0b1.rst @@ -0,0 +1,14 @@ +=============================== +Major changes for release 2.0b1 +=============================== + +* Support for versioning in S3 +* Support for MFA Delete in S3 +* Support for Elastic Map Reduce +* Support for Simple Notification Service +* Support for Google Storage +* Support for Consistent Reads and Conditional Puts in SimpleDB +* Significant updates and improvements to Mechanical Turk (mturk) module +* Support for Windows Bundle Tasks in EC2 +* Support for Reduced Redundancy Storage (RRS) in S3 +* Support for Cluster Computing instances and Placement Groups in EC2 \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.1.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.1.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..8c294acae5b1acf6b60c36b0733386e1ad70c4c5 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.1.0.rst @@ -0,0 +1,115 @@ +=========== +boto v2.1.0 +=========== + +The 2.1.0 release of boto is now available on `PyPI`_ and `Google Code`_. + +.. _`PyPI`: http://pypi.python.org/pypi/boto +.. _`Google Code`: http://code.google.com/p/boto/downloads/ + +You can view a list of issues that have been closed in this release at +https://github.com/boto/boto/issues?milestone=4&state=closed) + +You can get a comprehensive list of all commits made between the 2.0 release +and the 2.1.0 release at https://github.com/boto/boto/compare/033457f30d...a0a1fd54ef. + +Some highlights of this release: + +* Server-side encryption now supported in S3. +* Better support for VPC in EC2. +* Support for combiner in StreamingStep for EMR. +* Support for CloudFormations. +* Support for streaming uploads to Google Storage. +* Support for generating signed URL's in CloudFront. +* MTurk connection now uses HTTPS by default, like all other Connection objects. +* You can now PUT multiple data points to CloudWatch in one call. +* CloudWatch Dimension object now correctly supports multiple values for same + dimension name. +* Lots of documentation fixes/additions + +There were 235 commits in this release from 35 different authors. The authors +are listed below, in no particular order: + +* Erick Fejta +* Joel Barciauskas +* Matthew Tai +* Hyunjung Park +* Mitch Garnaat +* Victor Trac +* Andy Grimm +* ZerothAngel +* Dan Lecocq +* jmallen +* Greg Taylor +* Brian Grossman +* Marc Brinkmann +* Hunter Blanks +* Steve Johnson +* Keith Fitzgerald +* Kamil Klimkiewicz +* Eddie Hebert +* garnaat +* Samuel Lucidi +* Kazuhiro Ogura +* David Arthur +* Michael Budde +* Vineeth Pillai +* Trevor Pounds +* Mike Schwartz +* Ryan Brown +* Mark +* Chetan Sarva +* Dan Callahan +* INADA Naoki +* Mitchell Hashimoto +* Chris Moyer +* Riobard +* Ted Romer +* Justin Riley +* Brian Beach +* Simon Ratner + +We processed 60 pull requests for this release from 40 different contributors. Here are the github user id's for all of the pull request authors: + +* jtriley +* mbr +* jbarciauskas +* hyunjung +* bugi +* ryansb +* gtaylor +* ehazlett +* secretmike +* riobard +* simonratner +* irskep +* sanbornm +* methane +* jumping +* mansam +* miGlanz +* dlecocq +* fdr +* mitchellh +* ehebert +* memory +* hblanks +* mbudde +* ZerothAngel +* goura +* natedub +* tpounds +* bwbeach +* mumrah +* chetan +* jmallen +* a13m +* mtai +* fejta +* jibs +* callahad +* vineethrp +* JDrosdeck +* gholms + +If you are trying to reconcile that data (i.e. 35 different authors and 40 users with pull requests), well so am I. I'm just reporting on the data that I get from the Github api 8^) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.1.1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.1.1.rst new file mode 100644 index 0000000000000000000000000000000000000000..981d7261e89f0a4e482eed1952b6512950731e66 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.1.1.rst @@ -0,0 +1,7 @@ +=========== +boto v2.1.1 +=========== + +The 2.1.1 release fixes one serious issue with the RDS module. + +https://github.com/boto/boto/issues/382 \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.10.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.10.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..fda15b424d92c012543e2a2dc2f068364ebb35bc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.10.0.rst @@ -0,0 +1,54 @@ +boto v2.10.0 +============ + +:date: 2013/08/13 + +This release adds Mobile Push Notification support to Amazon Simple Notification +Service, better reporting for Amazon Redshift, SigV4 authorization for Amazon +Elastic MapReduce & lots of bugfixes. + + +Features +-------- + +* Added support for Mobile Push Notifications to SNS. This enables you to send + push notifications to mobile devices (such as iOS or Android) using SNS. + (:sha:`ccba574`) +* Added support for better reporting within Redshift. (:sha:`9d55dd3`) +* Switched Elastic MapReduce to use SigV4 for authorization. (:sha:`b80aa48`) + + +Bugfixes +-------- + +* Added the ``MinAdjustmentType`` parameter to EC2 Autoscaling. (:issue:`1562`, + :issue:`1619`, :sha:`1760284`, :sha:`2a11fd9`, :sha:`2d14006` & + :sha:`b7f1ae1`) +* Fixed how DynamoDB tracks changes to data in ``Item`` objects, fixing + failures with modified sets not being sent. (:issue:`1565`, + :sha:`b111fcf` & :sha:`812f9a6`) +* Updated the CA certificates Boto ships with. (:issue:`1578`, :sha:`4dfadc8`) +* Fixed how CloudSearch's ``Layer2`` object gets initialized. (:issue:`1629`, + :issue:`1630`, :sha:`40b3652` & :sha:`f797ff9`) +* Fixed the ``-w`` flag in ``s3put``. (:issue:`1637`, :sha:`0865004` & + :sha:`3fe70ca`) +* Added the ``ap-southeast-2`` endpoint for DynamoDB. (:issue:`1621`, + :sha:`501b637`) +* Fixed test suite to run faster. (:sha:`243a67e`) +* Fixed how non-JSON responses are caught from CloudSearch. (:issue:`1633`, + :issue:`1645`, :sha:`d5a5c01`, :sha:`954a50c`, :sha:`915d8ff` & + :sha:`4407fcb`) +* Fixed how ``DeviceIndex`` is parsed from EC2. (:issue:`1632`, :issue:`1646`, + :sha:`ff15e1f`, :sha:`8337a0b` & :sha:`27c9b04`) +* Fixed EC2's ``connect_to_region`` to respect the ``region`` parameter. ( + :issue:`1616`, :issue:`1654`, :sha:`9c37256`, :sha:`5950d12` & :sha:`b7eebe8`) +* Added ``modify_network_interface_atribute`` to EC2 connections. + (:issue:`1613`, :issue:`1656`, :sha:`e00b601`, :sha:`5b62f27`, :sha:`126f6e9`, + :sha:`bbfed1f` & :sha:`0c61293`) +* Added support for ``param_group`` within RDS. (:issue:`1639`, :sha:`c47baf0`) +* Added support for using ``Item.partial_save`` to create new records within + DynamoDBv2. (:issue:`1660`, :issue:`1521`, :sha:`bfa469f` & :sha:`58a13d7`) +* Several documentation improvements/fixes: + + * Updated guideline on how core should merge PRs. (:sha:`80a419c`) + * Fixed a typo in a CloudFront docstring. (:issue:`1657`, :sha:`1aa0621`) \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.11.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.11.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..267d4a157ff6657734f5c2dc93b3cbc36fa5c927 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.11.0.rst @@ -0,0 +1,62 @@ +boto v2.11.0 +============ + +:date: 2013/08/29 + +This release adds Public IP address support for VPCs created by EC2. It also +makes the GovCloud region available for all services. Finally, this release +also fixes a number of bugs. + + +Features +-------- + +* Added Public IP address support within VPCs created by EC2. (:sha:`be132d1`) +* All services can now easily use GovCloud. (:issue:`1651`, :sha:`542a301`, + :sha:`3c56121`, :sha:`9167d89`) +* Added ``db_subnet_group`` to + ``RDSConnection.restore_dbinstance_from_point_in_time``. (:issue:`1640`, + :sha:`06592b9`) +* Added ``monthly_backups`` to EC2's ``trim_snapshots``. (:issue:`1688`, + :sha:`a2ad606`, :sha:`2998c11`, :sha:`e32d033`) +* Added ``get_all_reservations`` & ``get_only_instances`` methods to EC2. + (:issue:`1572`, :sha:`ffc6cc0`) + + +Bugfixes +-------- + +* Fixed the parsing of CloudFormation's ``LastUpdatedTime``. (:issue:`1667`, + :sha:` 70f363a`) +* Fixed STS' ``assume_role_with_web_identity`` to work correctly. + (:issue:`1671`, :sha:`ed1f403`, :sha:`ca794d5`, :sha:`ed7e563`, + :sha:`859762d`) +* Fixed how VPC security group filtering is done in EC2. (:issue:`1665`, + :issue:`1677`, :sha:`be00956`, :sha:`5e85dd1`, :sha:`e63aae8`) +* Fixed fetching more than 100 records with ``ResourceRecordSet``. + (:issue:`1647`, :issue:`1648`, :issue:`1680`, :sha:`b64dd4f`, :sha:`276df7e`, + :sha:`e57cab0`, :sha:`e62a58b`, :sha:`4c81bea`, :sha:`a3c635b`) +* Fixed how VPC Security Groups are referred to when working with RDS. + (:issue:`1602`, :issue:`1683`, :issue:`1685`, :issue:`1694`, :sha:`012aa0c`, + :sha:`d5c6dfa`, :sha:`7841230`, :sha:`0a90627`, :sha:`ed4fd8c`, + :sha:`61d394b`, :sha:`ebe84c9`, :sha:`a6b0f7e`) +* Google Storage ``Key`` now uses transcoding-invariant headers where possible. + (:sha:`d36eac3`) +* Doing non-multipart uploads when using ``s3put`` no longer requires having + the ``ListBucket`` permission. (:issue:`1642`, :issue:`1693`, :sha:`f35e914`) +* Fixed the serialization of ``attributes`` in a variety of SNS methods. + (:issue:`1686`, :sha:`4afb3dd`, :sha:`a58af54`) +* Fixed SNS to be better behaved when constructing an mobile push notification. + (:issue:`1692`, :sha:`62fdf34`) +* Moved SWF to SigV4. (:sha:`ef7d255`) +* Several documentation improvements/fixes: + + * Updated the DynamoDB v2 docs to correct how the connection is built. + (:issue:`1662`, :sha:`047962d`) + * Fixed a typo in the DynamoDB v2 docstring for ``Table.create``. + (:sha:`be00956`) + * Fixed a typo in the DynamoDB v2 docstring for ``Table`` for custom + connections. (:issue:`1681`, :sha:`6a53020`) + * Fixed incorrect parameter names for ``DBParameterGroup`` in RDS. + (:issue:`1682`, :sha:`0d46aed`) + * Fixed a typo in the SQS tutorial. (:issue:`1684`, :sha:`38b7889`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.12.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.12.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..8b713c4b6f2d95043ebb7d44725c5590c6fc7898 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.12.0.rst @@ -0,0 +1,32 @@ +boto v2.12.0 +============ + +:date: 2013/09/04 + +This release adds support for Redis & replication groups to Elasticache as +well as several bug fixes. + + +Features +-------- + +* Added support for Redis & replication groups to Elasticache. (:sha:`f744ff6`) + + +Bugfixes +-------- + +* Boto's User-Agent string has changed. Mostly additive to include more + information. (:sha:`edb038a`) +* Headers that are part of S3's signing are now correctly coerced to the proper + case. (:issue:`1687`, :sha:`89eae8c`) +* Altered S3 so that it's possible to track what portions of a multipart upload + succeeded. (:issue:`1305`, :issue:`1675`, :sha:`e9a2c59`) +* Added ``create_lb_policy`` & ``set_lb_policies_of_backend_server`` to ELB. + (:issue:`1695`, :sha:`77a9458`) +* Fixed pagination when listing vaults in Glacier. (:issue:`1699`, + :sha:`9afecca`) +* Several documentation improvements/fixes: + + * Added some docs about what command-line utilities ship with boto. + (:sha:`5d7d54d`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.13.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.13.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..b28e9ba7844b8474123698db8a2c4d50fadeec21 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.13.0.rst @@ -0,0 +1,40 @@ +boto v2.13.0 +============ + +:date: 2013/09/12 + +This release adds support for VPC within AWS Opsworks, added dry-run support & +the ability to modify reserved instances in EC2 as well as several important +bugfixes for EC2, SNS & DynamoDBv2. + + +Features +-------- + +* Added support for VPC within Opsworks. (:sha:`56e1df3`) +* Added support for ``dry_run`` within EC2. (:sha:`dd7774c`) +* Added support for ``modify_reserved_instances`` & + ``describe_reserved_instances_modifications`` within EC2. (:sha:`7a08672`) + + +Bugfixes +-------- + +* Fixed EC2's ``associate_public_ip`` to work correctly. (:sha:`9db6101`) +* Fixed a bug with ``dynamodb_load`` when working with sets. (:issue:`1664`, + :sha:`ef2d28b`) +* Changed SNS ``publish`` to use POST. (:sha:`9c11772`) +* Fixed inability to create LaunchConfigurations when using Block Device + Mappings. (:issue:`1709`, :issue:`1710`, :sha:`5fd728e`) +* Fixed DynamoDBv2's ``batch_write`` to appropriately handle + ``UnprocessedItems``. (:issue:`1566`, :issue:`1679`, :issue:`1714`, + :sha:`2fc2369`) +* Several documentation improvements/fixes: + + * Added Opsworks docs to the index. (:sha:`5d48763`) + * Added docs on the correct string values for ``get_all_images``. + (:issue:`1674`, :sha:`1e4ed2e`) + * Removed a duplicate ``boto.s3.prefix`` entry from the docs. + (:issue:`1707`, :sha:`b42d34c`) + * Added an API reference for ``boto.swf.layer2``. (:issue:`1712`, + :sha:`9f7b15f`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.13.2.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.13.2.rst new file mode 100644 index 0000000000000000000000000000000000000000..4e3d6842b0b24eb54275c2670992bef2fa2d078b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.13.2.rst @@ -0,0 +1,39 @@ +boto v2.13.2 +============ + +:date: 2013/09/16 + +This release is a bugfix-only release, correcting several problems in EC2 as +well as S3, DynamoDB v2 & SWF. + +.. note:: + + There was no v2.13.1 release made public. There was a packaging error that + was discovered before it was published to PyPI. + + We apologise for the fault in the releases. Those responsible have been + sacked. + + +Bugfixes +-------- + +* Fixed test fallout from the EC2 dry-run change. (:sha:`2159456`) +* Added tests for more of SWF's ``layer2``. (:issue:`1718`, :sha:`35fb741`, + :sha:`a84d401`, :sha:`1cf1641`, :sha:`a36429c`) +* Changed EC2 to allow ``name`` to be optional in calls to ``copy_image``. + (:issue:`1672`, :sha:` 26285aa`) +* Added ``billingProducts`` support to EC2 ``Image``. (:issue:`1703`, + :sha:`cccadaf`, :sha:`3914e91`) +* Fixed a place where ``dry_run`` was handled in EC2. (:issue:`1722`, + :sha:`0a52c82`) +* Fixed ``run_instances`` with a block device mapping. (:issue:`1723`, + :sha:`974743f`, :sha:`9049f05`, :sha:`d7edafc`) +* Fixed ``s3put`` to accept headers with a ``=`` in them. (:issue:`1700`, + :sha:`7958c70`) +* Fixed a bug in DynamoDB v2 where scans with filters over large sets may not + return all values. (:issue:`1713`, :sha:`02893e1`) +* Cloudsearch now uses SigV4. (:sha:`b2bdbf5`) +* Several documentation improvements/fixes: + + * Added the "Apps Built On Boto" doc. (:sha:`3bd628c`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.13.3.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.13.3.rst new file mode 100644 index 0000000000000000000000000000000000000000..f145f75ae5b9d0bf0be1371a7f69e3ae06ab5f05 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.13.3.rst @@ -0,0 +1,11 @@ +boto v2.13.3 +============ + +:date: 2013/09/16 + +This release fixes a packaging error with the previous version of boto. +The version ``v2.13.2`` was provided instead of ``2.13.2``, causing things +like ``pip`` to incorrectly resolve the latest release. + +That release was only available for several minutes & was removed from PyPI +due to the way it would break installation for users. diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.14.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.14.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..4f235f0489ee59c28cdf44d9ebc74141536f8d0e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.14.0.rst @@ -0,0 +1,63 @@ +boto v2.14.0 +============ + +:date: 2013/10/09 + +This release makes ``s3put`` region-aware, adds some missing features to +EC2 and SNS, enables EPUB documentation output, and makes the HTTP(S) +connection pooling port-aware, which in turn enables connecting to +e.g. mock services running on ``localhost``. It also includes support +for the latest EC2 and OpsWorks features, as well as several +important bugfixes for EC2, DynamoDB, MWS, and Python 2.5 support. + + +Features +-------- + +* Add support for a ``--region`` argument to ``s3put`` and auto-detect bucket + regions if possible (:issue:`1731`, :sha:`d9c28f6`) +* Add ``delete_notification_configuration`` for EC2 autoscaling + (:issue:`1717`, :sha:`ebb7ace`) +* Add support for registering HVM instances (:issue:`1733`, :sha:`2afc68e`) +* Add support for ``ReplaceRouteTableAssociation`` for EC2 (:issue:`1736`, + :sha:`4296835`) +* Add ``sms`` as an option for SNS subscribe (:issue:`1744`, :sha:`8ff08e5`) +* Allow overriding ``has_google_credentials`` (:issue:`1752`, :sha:`052cc91`) +* Add EPUB output format for docs (:issue:`1759`, :sha:`def7c67`) +* Add handling of ``Connection: close`` HTTP headers in responses + (:issue:`1773`, :sha:`1a38f32`) +* Make connection pooling port-aware (:issue:`1764`, :issue:`1737`, + :sha:`b6c7330`) +* Add support for ``instance_type`` to ``modify_reserved_instances`` + (:sha:`bf07eee`) +* Add support for new OpsWorks features (:sha:`f512898`) + + +Bugfixes +-------- + +* Remove erroneous ``dry_run`` parameter (:issue:`1729`, :sha:`35a516e`) +* Fix task_list override in poll methods of SWF Deciders and Workers ( + :issue:`1724`, :sha:`fa8d871`) +* Remove Content-Encoding header from metadata test (:issue:`1735`, + :sha:`c8b0130`) +* Fix the ability to override DynamoDBv2 host and port when creating + connections (:issue:`1734`, :sha:`8d2b492`) +* Fix UnboundLocalError (:sha:`e0e6aeb`) +* ``self.rules`` is of type IPPermissionsList, remove takes no kwargs + (:sha:`3c56b3f`) +* Nicer error messages for 403s (:issue:`1753`, :sha:`d3d9eab`) +* Various documentation fixes (:issue:`1762`, :sha:`76aef10`) +* Various Python 2.5 fixes (:sha:`150aef6`, :sha:`67ae9ff`) +* Prevent certificate tests from failing for non-govcloud accounts + (:sha:`2d3d9f6`) +* Fix flaky resumable upload test (:issue:`1768`, :sha:`6aa8ae2`) +* Force the Host HTTP header to fix an issue with older httplibs + (:sha:`202c456`) +* Blacklist S3 from forced Host HTTP header (:sha:`9193226`) +* Fix ``propagate_at_launch`` spelling error (:issue:`1739`, :sha:`e78d88a`) +* Remove unused code that causes exceptions with bad response data + (:issue:`1771`, :sha:`bec5e70`) +* Fix ``detach_subnets`` typo (:issue:`1760`, :sha:`4424e1b`) +* Fix result list handling of ``GetMatchingProductForIdResponse`` for MWS + (:issue:`1751`, :sha:`977b7dc`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.15.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.15.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..648525fe807d4ef963ff66e236a84b213921ef01 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.15.0.rst @@ -0,0 +1,40 @@ +boto v2.15.0 +============ + +:date: 2013/10/17 + +This release adds support for Amazon Elastic Transcoder audio transcoding, new +regions for Amazon Simple Storage Service (S3), Amazon Glacier, and Amazon +Redshift as well as new parameters in Amazon Simple Queue Service (SQS), Amazon +Elastic Compute Cloud (EC2), and the ``lss3`` utility. Also included are +documentation updates and fixes for S3, Amazon DynamoDB, Amazon Simple Workflow +Service (SWF) and Amazon Marketplace Web Service (MWS). + + +Features +-------- + +* Add SWF tutorial and code sample (:issue:`1769`, :sha:`36524f5`) +* Add ap-southeast-2 region to S3WebsiteEndpointTranslate (:issue:`1777`, + :sha:`e7b0b39`) +* Add support for ``owner_acct_id`` in SQS ``get_queue`` (:issue:`1786`, + :sha:`c1ad303`) +* Add ap-southeast-2 region to Glacier (:sha:`c316266`) +* Add ap-southeast-1 and ap-southeast-2 to Redshift (:sha:`3d67a03`) +* Add SSH timeout option (:issue:`1755`, :sha:`d8e70ef`, :sha:`653b82b`) +* Add support for markers in ``lss3`` (:issue:`1783`, :sha:`8ee4b1f`) +* Add ``block_device_mapping`` to EC2 ``create_image`` (:issue:`1794`, + :sha:`86afe2e`) +* Updated SWF tutorial (:issue:`1797`, :sha:`3804b16`) +* Support Elastic Transcoder audio transcoding (:sha:`03a5087`) + +Bugfixes +-------- + +* Fix VPC module docs, ELB docs, some formatting (:issue:`1770`, + :sha:`75de377`) +* Fix DynamoDB item ``attrs`` initialization (:issue:`1776`, :sha:`8454a2b`) +* Fix parsing of empty member lists for MWS (:issue:`1785`, :sha:`7b46ca5`) +* Fix link to release notes in docs (:sha:`a6bf794`) +* Do not validate bucket when copying a key (:issue:`1763`, :sha:`5505113`) +* Retry HTTP 502, 504 errors (:issue:`1798`, :sha:`c832e2d`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.16.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.16.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..afa4465aea272064256c39dd01872e57bdb7531c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.16.0.rst @@ -0,0 +1,41 @@ +boto v2.16.0 +============ + +:date: 2013/11/08 + +This release adds new Amazon Elastic MapReduce functionality, provides updates and fixes for Amazon EC2, Amazon VPC, Amazon DynamoDB, Amazon SQS, Amazon Elastic MapReduce, and documentation updates for several services. + + +Features +-------- +* Added recipe for parallel execution of activities to SWF tutorial. (:issue:`1800`, :issue:`1800`, :sha:`52c5432`) +* Added launch_config's parameter associate_ip_address for VPC. (:issue:`1799`, :issue:`1799`, :sha:`6685adb`) +* Update elbadmin add/remove commands to support multiple instance arguments. (:issue:`1806`, :issue:`1806`, :sha:`4aad26d`) +* Added documentation for valid auto scaling event types and tags. (:issue:`1807`, :issue:`1807`, :sha:`664f6e8`) +* Support VPC tenancy restrictions and filters for DHCP options. (:issue:`1801`, :issue:`1801`, :sha:`8c5d8de`) +* Add VPC network ACL support. (:issue:`1809`, :issue:`1098`, :issue:`1809`, :sha:`9043d09`) +* Add convenience functions to make DynamoDB2 behave more like DynamoDB (:issue:`1780`, :sha:`2cecaca`) +* EC2 cancel_spot_instance_requests now returns a list of SpotInstanceRequest objects. (:issue:`1811`, :issue:`1811`, :issue:`1754`, :sha:`f3361b9`) +* Fix VPC DescribeVpnConnections call argument; Add support for static_routes_only when creating a new VPC. (:issue:`1816`, :issue:`1816`, :issue:`1481`, :sha:`b408637`) +* Add a section about DynamoDB Local to the DynamoDBv2 high level docs. (:issue:`1821`, :issue:`1821`, :issue:`1818`, :sha:`639505f`) +* Add support for new Elastic MapReduce APIs (:issue:`1836`, :sha:`5562264`) +* Modify EMR add_jobflow_steps to return a JobFlowStepList. (:issue:`1838`, :issue:`1838`, :sha:`ef9564f`) +* Generate docs for route53/zone, remove docs for route53/hostedzone. (:issue:`1837`, :issue:`1837`, :sha:`99e2e67`) + + +BugFixes +-------- +* Fix for MWS iterator handling (:sha:`7e6f98d`) +* Clarify documentation for MetricAlarm dimensions. (:issue:`1808`, :issue:`1808`, :issue:`1803`, :sha:`4233fbf`) +* Fixes for general connection behind proxy. (:issue:`1781`, :issue:`1781`, :sha:`dc8bbea`) +* Validate S3 method kwarg names to prevent misspelling. (:issue:`1810`, :issue:`1810`, :issue:`1782`, :sha:`947a14a`) +* Fix dependencies so they show up as optional in CheeseShop (:issue:`1617`, :sha:`54da8b6`) +* Route53 retry HTTP error 400s (:issue:`1618`, :sha:`6e355b3`) +* Fix typo in IAMConnection documentation (:issue:`1820`, :sha:`3fc335d`) +* Fix MWS MemberLists parsing. (:issue:`1815`, :issue:`1815`, :sha:`0f6f089`) +* Fix typo in SQS documentation (:issue:`1830`, :sha:`20532a6`) +* Update auto scaling documentation. (:issue:`1824`, :issue:`1824`, :issue:`1823`, :sha:`9a359ec`) +* Fixing region endpoints for EMR (:issue:`1831`, :sha:`ed669f7`) +* Raising an exception in SQS message decode() should not abort parsing. (:issue:`1835`, :issue:`1835`, :issue:`1833`, :sha:`2a00c92`) +* Replace correct VPC ACL association instead of just the first one. (:issue:`1844`, :issue:`1844`, :issue:`1843`, :sha:`c70b8d6`) +* Prevent swallowing CloudSearch errors (:issue:`1846`, :issue:`1842`, :sha:`c2f955b`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.17.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.17.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..b64ba5f59cc55976f2ad3fa52ef9444906084117 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.17.0.rst @@ -0,0 +1,21 @@ +boto v2.17.0 +============ + +:date: 2013/11/14 + +This release adds support for the new AWS CloudTrail service, support for +Amazon Redshift's new features related encryption, audit logging, data load +from external hosts, WLM configuration, database distribution styles and +functions, as well as cross region snapshot copying. + + +Features +-------- + +* Add support for AWS CloudTrail (:sha:`53ba0c9`) +* Add support for new Amazon Redshift features (:sha:`d94b48c`) + +Bugfixes +-------- + +* Add missing argument for Google Storage resumable uploads. (:sha:`b777b62`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.18.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.18.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..268a5625682ae2f29a25dcc7a3bd53dcb9158a99 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.18.0.rst @@ -0,0 +1,41 @@ +boto v2.18.0 +============ + +:date: 2013/11/22 + +This release adds support for new AWS Identity and Access Management (IAM), +AWS Security Token Service (STS), Elastic Load Balancing (ELB), Amazon Elastic +Compute Cloud (EC2), Amazon Relational Database Service (RDS), and Amazon +Elastic Transcoder APIs and parameters. Amazon Redshift SNS notifications are +now supported. CloudWatch is updated to use signature version four, issues +encoding HTTP headers are fixed and several services received documentation +fixes. + + +Features +-------- +* Add support for new STS and IAM calls related to SAML. (:issue:`1867`, + :issue:`1867`, :sha:`1c51d17`) +* Add SigV4 support to Cloudwatch (:sha:`ef43035`) +* Add support for ELB Attributes and Cross Zone Balancing. (:issue:`1852`, + :issue:`1852`, :sha:`76f8b7f`) +* Add RDS promote and rename support. (:issue:`1857`, :issue:`1857`, + :sha:`0b62c70`) +* Update EC2 ``get_all_snapshots`` and add support for registering an image + with a snapshot. (:issue:`1850`, :issue:`1850`, :sha:`3007956`) + + +Bugfixes +-------- +* Fix issues related to encoding of values in HTTP headers when using + unicode. (:issue:`1864`, :issue:`1864`, :issue:`1839`, :issue:`1829`, + :issue:`1828`, :issue:`702`, :sha:`5610dd7`) +* Fix order of Beanstalk documetation to match param order. (:issue:`1863`, + :issue:`1863`, :sha:`a3a29f8`) +* Make sure file is closed before attempting to delete it when downloading + an S3 key. (:issue:`1791`, :sha:`0e6dcbe`) +* Fix minor CloudTrail documentation typos. (:issue:`1861`, :issue:`1861`, + :sha:`256a115`) +* Fix DynamoDBv2 tutorial sentence with missing verb. (:issue:`1859`, + :issue:`1825`, :issue:`1859`, :sha:`0fd5300`) +* Fix parameter validation for gs (:issue:`1858`, :sha:`6b9a869`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.19.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.19.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..e67affbf493741500bb02e42c26530d4c7bb7396 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.19.0.rst @@ -0,0 +1,24 @@ +boto v2.19.0 +============ + +:date: 2013/11/27 + +This release adds support for max result limits for Amazon EC2 calls, adds +support for Amazon RDS database snapshot copies and fixes links to the +changelog. + + +Features +-------- +* Add max results parameters to EC2 describe instances and describe tags. + (:issue:`1873`, :issue:`1873`, :sha:`ad8a64a`) +* Add support for RDS CopyDBSnapshot. (:issue:`1872`, :issue:`1872`, + :issue:`1865`, :sha:`bffb758`) + + +Bugfixes +-------- +* Update README.rst to link to ReadTheDocs changelogs. (:issue:`1869`, + :sha:`26f3dbe`) +* Delete the old changelog in favor of the README link to ReadTheDocs + changelogs. (:issue:`1870`, :issue:`1870`, :sha:`32bc333`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.2.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.2.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..0fefd1718fb1b1e0d1a62d414a179eaee072e95b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.2.0.rst @@ -0,0 +1,89 @@ +=========== +boto v2.2.0 +=========== + +The 2.2.0 release of boto is now available on `PyPI`_. + +.. _`PyPI`: http://pypi.python.org/pypi/boto + +You can view a list of issues that have been closed in this release at +https://github.com/boto/boto/issues?milestone=5&state=closed. + +You can get a comprehensive list of all commits made between the 2.0 release +and the 2.1.0 release at +https://github.com/boto/boto/compare/fa0d6a1e49c8468abbe2c99cdc9f5fd8fd19f8f8...26c8eb108873bf8ce1b9d96d642eea2beef78c77. + +Some highlights of this release: + +* Support for Amazon DynamoDB service. +* Support for S3 Object Lifecycle (Expiration). +* Allow anonymous request for S3. +* Support for creating Load Balancers in VPC. +* Support for multi-dimension metrics in CloudWatch. +* Support for Elastic Network Interfaces in EC2. +* Support for Amazon S3 Multi-Delete capability. +* Support for new AMIversion and overriding of parameters in EMR. +* Support for SendMessageBatch request in SQS. +* Support for DescribeInstanceStatus request in EC2. +* Many, many improvements and additions to API documentation and Tutorials. + Special thanks to Greg Taylor for all of the Sphinx cleanups and new docs. + +There were 336 commits in this release from 40 different authors. The authors +are listed below, in no particular order: + +* Garrett Holmstrom +* mLewisLogic +* Warren Turkal +* Nathan Binkert +* Scott Moser +* Jeremy Edberg +* najeira +* Marc Cohen +* Jim Browne +* Mitch Garnaat +* David Ormsbee +* Blake Maltby +* Thomas O'Dowd +* Victor Trac +* David Marin +* Greg Taylor +* rdodev +* Jonathan Sabo +* rdoci +* Mike Schwartz +* l33twolf +* Keith Fitzgerald +* Oleksandr Gituliar +* Jason Allum +* Ilya Volodarsky +* Rajesh +* Felipe Reyes +* Andy Grimm +* Seth Davis +* Dave King +* andy +* Chris Moyer +* ruben +* Spike Gronim +* Daniel Norberg +* Justin Riley +* Milan Cermak timtebeek +* unknown +* Yotam Gingold +* Brian Oldfield + +We processed 21 pull requests for this release from 40 different contributors. +Here are the github user id's for all of the pull request authors: + +* milancermak +* jsabo +* gituliar +* rdodev +* marccohen +* tpodowd +* trun +* jallum +* binkert +* ormsbee +* timtebeek + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.2.1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.2.1.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d122040e25803a4aaee306435794790c21cc348 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.2.1.rst @@ -0,0 +1,6 @@ +=========== +boto v2.2.1 +=========== + +The 2.2.1 release fixes a packaging problem that was causing problems when +installing via pip. \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.2.2.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.2.2.rst new file mode 100644 index 0000000000000000000000000000000000000000..993dc4833dd7fc839054fff75fa226e992d5cc1b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.2.2.rst @@ -0,0 +1,31 @@ +=========== +boto v2.2.2 +=========== + +The 2.2.2 release of boto is now available on `PyPI`_. + +.. _`PyPI`: http://pypi.python.org/pypi/boto + +You can view a list of issues that have been closed in this release at +https://github.com/boto/boto/issues?milestone=8&state=closed. + +You can get a comprehensive list of all commits made between the 2.2.1 release +and the 2.2.2 release at https://github.com/boto/boto/compare/2.2.1...2.2.2. + +This is a bugfix release. + +There were 71 commits in this release from 11 different authors. The authors +are listed below, in no particular order: + +* aficionado +* jimbrowne +* rdodev +* milancermak +* garnaat +* kopertop +* samuraisam +* tpodowd +* psa +* mfschwartz +* gtaylor + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.20.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.20.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..2700e1b61774807921f56f3702ad9a6a031c365f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.20.0.rst @@ -0,0 +1,31 @@ +boto v2.20.0 +============ + +:date: 2013/12/12 + +This release adds support for Amazon Kinesis and AWS Direct Connect. Amazon EC2 gets support for new i2 instance types and is more resilient against metadata failures, Amazon DynamoDB gets support for global secondary indexes and Amazon Relational Database Service (RDS) supports new DBInstance and DBSnapshot attributes. There are several other fixes for various services, including updated support for CloudStack and Eucalyptus. + + +Features +-------- +* Add support for Amazon Kinesis (:sha:`d0b684e`) +* Add support for i2 instance types to EC2. (:sha:`0f5371f`) +* Add support for DynamoDB Global Secondary Indexes (:sha:`297cacb`) +* Add support for AWS Direct Connect. (:issue:`1894`, :issue:`1894`, :sha:`3cbca26`) +* Add option for sorting SDB dumps to sdbadmin. (:issue:`1888`, :issue:`1888`, :sha:`070e4f6`) +* Add a retry when EC2 metadata is returned as corrupt JSON. (:issue:`1883`, :issue:`1883`, :issue:`1868`, :sha:`41470a0`) +* Added some missing attributes to DBInstance and DBSnapshot. (:issue:`1880`, :issue:`1880`, :sha:`2751dff`) + + +Bugfixes +-------- +* Implement nonzero for DynamoDB Item to consider empty items falsey (:issue:`1899`, :sha:`808e550`) +* Remove `dimensions` from Metric.query() docstring. (:issue:`1901`, :issue:`1901`, :sha:`ba6b8c7`) +* Make trailing slashes for EC2 metadata URLs explicit & remove them from userdata requests. This fixes using boto for CloudStack (:issue:`1900`, :issue:`1900`, :issue:`1897`, :issue:`1856`, :sha:`5f4506e`) +* Fix the DynamoDB 'scan in' filter to compare the same attribute types in a list rather than using an attribute set. (:issue:`1896`, :issue:`1896`, :sha:`5fc59d6`) +* Updating Amazon ElastiCache parameters to be optional when creating a new cache cluster. (:issue:`1876`, :issue:`1876`, :sha:`342b8df`) +* Fix honor cooldown AutoScaling parameter serialization to prevent an exception and bad request. (:issue:`1895`, :issue:`1895`, :issue:`1892`, :sha:`fc4674f`) +* Fix ignored RDS backup_retention_period when value was 0. (:issue:`1887`, :issue:`1887`, :issue:`1886`, :sha:`a19eb14`) +* Use auth_handler to specify host header value including custom ports if possible, which are used by Eucalyptus. (:issue:`1862`, :issue:`1862`, :sha:`ce6df03`) +* Fix documentation of launch config in Autoscaling Group. (:issue:`1881`, :issue:`1881`, :sha:`6f704d9`) +* typo: AIM -> IAM (:issue:`1882`, :sha:`7ea2d5c`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.20.1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.20.1.rst new file mode 100644 index 0000000000000000000000000000000000000000..6793d17623c106b978ae538bd27704f36a1b7a71 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.20.1.rst @@ -0,0 +1,11 @@ +boto v2.20.1 +============ + +:date: 2013/12/13 + +This release fixes an important Amazon EC2 bug related to fetching security credentials via the meta-data service. It is recommended that users of boto-2.20.0 upgrade to boto-2.20.1. + + +Bugfixes +-------- +* Bug fix for IAM security credentials metadata URL. (:issue:`1912`, :issue:`1908`, :issue:`1907`, :sha:`f82e7a5`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.21.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.21.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..355a9ec071460a9b44639e0e877b45dcecb5b8bd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.21.0.rst @@ -0,0 +1,43 @@ +boto v2.21.0 +============ + +:date: 2013/12/19 + +This release adds support for the latest AWS OpsWorks, AWS Elastic Beanstalk, +Amazon DynamoDB, Amazon Elastic MapReduce (EMR), Amazon Simple Storage Service +(S3), Amazon Elastic Transcoder, AWS CloudTrail, and AWS Support APIs. It also +includes documentation and other fixes. + +.. note:: + + Although Boto now includes support for the newly announced China (Beijing) + Region, the service endpoints will not be accessible until the Region’s + limited preview is launched in early 2014. To find out more about the new + Region and request a limited preview account, please visit + http://www.amazonaws.cn/. + + +Features +-------- +* Add support for Elastic Transcoder pagination and new codecs (:sha:`dcb1c5a`) +* Add support for new CloudTrail calling format (:sha:`aeafe9b`) +* Update to the latest Support API (:sha:`45e1884`) +* Add support for arbitrarily large SQS messages stored in S3 via BigMessage. (:issue:`1917`, :sha:`e6cd665`) +* Add support for ``encoding_type`` to S3 (:sha:`6b2d967`) +* Add support for Elastic MapReduce tags (:issue:`1928`, :issue:`1920`, :sha:`b9749c6`, :sha:`8e4c595`) +* Add high level support for global secondary indexes in DynamoDB (:issue:`1924`, :issue:`1913`, :sha:`32dac5b`) +* Add support for Elastic Beanstalk worker environments. (:issue:`1911`, :sha:`bbd4fbf`) +* Add support for OpsWorks IAM user permissions per stack (:sha:`ac6e4e7`) +* Add support for SigV4 to S3 (:sha:`deb9e18`) +* Add support for SigV4 to EC2 (:sha:`bdebfe0`) +* Add support for SigV4 to ElastiCache (:sha:`b892b45`) + + +Bugfixes +-------- +* Add documentation describing account usage for multipart uploads in S3 (:sha:`af03d8d`) +* Update DesiredCapacity if AutoScalingGroup.desired_capacity is not None. (:issue:`1906`, :issue:`1906`, :issue:`1757`, :sha:`b6670ce`) +* Documentation: add Kinesis API reference (:issue:`1921`, :sha:`c169836`) +* Documentation: sriovNetSupport instance attribute (:issue:`1915`, :sha:`e1bafcc`) +* Update RDS documentation for API version: 2013-09-09 (:issue:`1914`, :sha:`fcf702a`) +* Switch all classes to new style classes which results in memory use improvements (:sha:`ca36fa2`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.21.1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.21.1.rst new file mode 100644 index 0000000000000000000000000000000000000000..000f6ce7304eb119e8ac19cf933aa9d241f9fb2b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.21.1.rst @@ -0,0 +1,21 @@ +boto v2.21.1 +============ + +:date: 2013/12/23 + +This release is a bugfix release which corrects how the Mechanical Turk objects +work & a threading issue when using ``datetime.strptime``. + + +Bugfixes +-------- + +* Added ``cn-north-1`` to regions. (:sha:`9c89de1`) +* Fixed threading issues related to ``datetime.strptime``. (:issue:`1898`, + :sha:`2ef66c9`) +* Updated all the old-style inheritance calls. (:issue:`1918`, :issue:`1936`, + :issue:`1937`, :sha:`39a997f` & :sha:`607624f`) +* Documentation: + + * Added missed notes about the cn-north-1 region. (:sha:`738c8cb`) + * Added the C3 family of EC2 instances. (:issue:`1938`, :sha:`05b7482`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.21.2.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.21.2.rst new file mode 100644 index 0000000000000000000000000000000000000000..ab359b65e5a9b0ecc505c6f7f8e8c77f2e7d8b50 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.21.2.rst @@ -0,0 +1,13 @@ +boto v2.21.2 +============ + +:date: 2013/12/24 + +This release is a bugfix release which corrects one more bug in the Mechanical +Turk objects. + + +Bugfixes +-------- + +* Fixed a missed inheritance bug in mturk. (:issue:`1936`, :sha:`0137f29`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.22.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.22.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..3c2b076f2bab107cb003da0015bf5c83a474889a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.22.0.rst @@ -0,0 +1,29 @@ +boto v2.22.0 +============ + +:date: 2014/01/02 + +This release updates the Auto Scaling to support the latest API, the ability +to control the response sizes in Amazon DynamoDB queries/scans & a number of +bugfixes as well. + + +Features +-------- + +* Updated Auto Scaling to support the latest API. (:sha:`9984c4f`) +* Added the ability to alter response sizes in DynamoDB queries/scans. + (:issue:`1949`, :sha:`6761b01`) + + +Bugfixes +-------- + +* Fix string instance tests. (:issue:`1959`, :sha:`ee203bf`) +* Add missing parameters to ``get_spot_price_history method``. (:issue:`1958`, + :sha:`f635474`) +* Fix unicode string parameter handling in S3Connection. (:issue:`1954`, + :issue:`1952`, :sha:`12e6b0c`) +* Fix typo in docstring for SSHClient.run. (:issue:`1953`, :sha:`5263b20`) +* Properly handle getopt long options in s3put. (:issue:`1950`, :issue:`1946`, + :sha:`cf693ff`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.22.1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.22.1.rst new file mode 100644 index 0000000000000000000000000000000000000000..0fe7aafcb3eaa36b0eff66676f7c88828c514161 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.22.1.rst @@ -0,0 +1,22 @@ +boto v2.22.1 +============ + +:date: 2014/01/06 + +This release fixes working with keys with special characters in them while using +Signature V4 with Amazon Simple Storage Service (S3). It also fixes a regression +in the ``ResultSet`` object, re-adding the ``nextToken`` attribute. This was +most visible from within Amazon Elastic Compute Cloud (EC2) when calling the +``get_spot_price_history`` method. + +Users in the cn-north-1 region or who make active use of +``get_spot_price_history`` are recommended to upgrade. + + +Bugfixes +-------- + +* Fixed key names with special characters in S3 when using SigV4. + (:sha:`8b37180`) +* Re-added the ``nextToken`` attribute to the EC2 result set object. + (:issue:`1968`, :sha:`6928928`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.23.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.23.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..f30473d5d7f3ca23c0ab3810e1783be33aecc500 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.23.0.rst @@ -0,0 +1,49 @@ +boto v2.23.0 +============ + +:date: 2014/01/10 + +This release adds new pagination & date range filtering to Amazon Glacier, more +support for selecting specific attributes within Amazon DynamoDB, security +tokens from environment/config variables & many bugfixes/small improvements. + + +Features +-------- + +* Added pagination & date range filtering to Glacier inventory options. + (:issue:`1977`, :sha:`402a305`) +* Added the ability to select the specific attributes to fetch in the ``scan`` + & ``get_item`` calls within DynamoDB v2. (:issue:`1945`, :issue:`1972`, + :sha:`f6451fb` & :sha:`96cd413`) +* Allow getting a security token from either an environment or configuration + variable. (:issue:``, :sha:``) +* Ported the ``has_item`` call from the original DynamoDB (v1) module to + DynamoDB v2. (:issue:`1973`, :issue:`1822`, :sha:`f96e9e3`) +* Added an ``associate_address_object`` method to EC2. (:issue:`1967`, + :issue:`1874`, :issue:`1893`, :sha:`dd6180c`) +* Added a ``download_to_fileobj`` method to Glacier,similar to the S3 call + of the same name. (:issue:`1960`, :issue:`1941`, :sha:`67266e5`) +* Added support for arbitrary ``dict`` inputs to MWS. (:issue:`1966`, + :sha:`46f193f`) + + +Bugfixes +-------- + +* Made the usage of ``is/is not`` more consistent. (:issue:`1930`, + :sha:`8597c54`) +* Imported ``with_statement`` for old Python versions (:issue:`1975`, + :sha:`a53a574`) +* Changed the ``Binary`` data object within DynamoDB to throw an error if an + invalid data type is used. (:issue:`1963`, :issue:`1956`, :sha:`e5d30c8`) +* Altered the integration tests to avoid connection errors to certain regions. + (:sha:`2555b8a`) +* Changed the GCS resumable upload handler to save tracker files with protection + 0600. (:sha:`7cb344c`) +* Documentation: + + * Clarified documentation around the ``list_metrics`` call in + CloudFormation. (:issue:`1962`, :sha:`c996a72`) + * Added ``Tag`` to the Autoscale API docs. (:issue:`1964`, :sha:`31118d9`) + * Updated the AWS Support documentation to the latest. (:sha:`29f9264`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.24.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.24.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..737a1f1a7b8f6f24e3b1cf761f7698f9541d4f35 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.24.0.rst @@ -0,0 +1,36 @@ +boto v2.24.0 +============ + +:date: 2014/01/29 + +This release adds M3 instance types to Amazon EC2, adds support for dead letter queues to Amazon Simple Queue Service (SQS), adds a single JSON file for all region and endpoint information and provides several fixes to a handful of services and documentation. Additionally, the SDK now supports using AWS Signature Version 4 with Amazon S3. + +Features +-------- +* Load region and endpoint information from a JSON file (:sha:`b9dbaad`) +* Return the x-amz-restore header with GET KEY and fix provider prefix. (:issue:`1990`, :sha:`43e8e0a`) +* Make S3 key validation optional with the ``validate`` parameter (:issue:`2013`, :issue:`1996`, :sha:`fd6b632`) +* Adding new eu-west-1 and eu-west-2 endpoints for SES. (:issue:`2015`, :sha:`d5ef862`, :sha:`56ba3e5`) +* Google Storage now uses new-style Python classes (:issue:`1927`, :sha:`86c9f77`) +* Add support for step summary list to Elastic MapReduce (:issue:`2011`, :sha:`d3af158`) +* Added the M3 instance types. (:issue:`2012`, :sha:`7c82f57`) +* Add credential profile configuration (:issue:`1979`, :sha:`e3ab708`) +* Add support for dead letter queues to SQS (:sha:`93c7d05`) + +Bugfixes +-------- +* Make the Lifecycle Id optional and fix prefix=None in XML generation. (:issue:`2021`, :sha:`362a04a`) +* Fix DynamoDB query limit bug (:issue:`2014`, :sha:`7ecb3f7`) +* Add documentation about the version_id behavior of Key objects. (:issue:`2026`, :sha:`b6b242c`) +* Fixed typo in Table.create example (:issue:`2023`, :sha:`d81a660`) +* Adding a license/copyright header. (:issue:`2025`, :sha:`26ded39`) +* Update the docstring for the SNS subscribe method (:issue:`2017`, :sha:`4c806de`) +* Renamed unit test with duplicate name (:issue:`2016`, :sha:`c7bd0bd`) +* Use UTC instead of local time in ``test_refresh_credentials`` (:issue:`2020`, :sha:`b5a2eaf`) +* Fix missing ``security_token`` option in some connection classes (:issue:`1989`, :issue:`1942`, :sha:`2b72f32`) +* Fix listing S3 multipart uploads with some parameter combinations (:issue:`2000`, :sha:`49045bc`) +* Fix ``elbadmin`` crash because of non-extant instances in load balancer (:issue:`2001`, :sha:`d47cc14`) +* Fix anonymous S3 fetch test case (:issue:`1988`, :issue:`1992`, :sha:`8fb1666`) +* Fix ``elbadmin`` boto import (:issue:`2002`, :sha:`674c3a6`) +* Fixing SQS tutorial to correctly describe behavior of the write operation (:issue:`1986`, :sha:`6147d86`) +* Fix various grammar mistakes (:issue:`1980`, :sha:`ada40b5`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.25.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.25.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..c925d2cf7f2e98bac279f6225528e4aa382d5e61 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.25.0.rst @@ -0,0 +1,57 @@ +boto v2.25.0 +============ + +:date: 2014/02/07 + +This release includes Amazon Route53 service and documentation updates, +preliminary log file support for Amazon Relational Database Service (RDS), as +well as various other small fixes. Also included is an opt-in to use signature +version 4 with Amazon EC2. + +**IMPORTANT** - This release also include a **SIGNIFICANT** underlying change +to the Amazon S3 ``get_bucket`` method, to addresses the blog post by AppNeta_. +We've altered the default behavior to now perform a ``HEAD`` on the bucket, in +place of the old ``GET`` behavior (which would fetch a zero-length list of +keys). + +This should reduce all users costs & should also be *mostly* +backward-compatible. **HOWEVER**, if you were previously parsing the exception +message from ``S3Connection.get_bucket``, you *will* have to change your code +(see the S3 tutorial for details). ``HEAD`` does *not* return as detailed of +error messages & while we've attempted to patch over as much of the differences +as we can, there may still be edge-cases over the prior behavior. + +.. _AppNeta: http://www.appneta.com/blog/s3-list-get-bucket-default/ + + +Features +-------- + +* Add support for Route53 API version 2013-04-01 (:issue:`2080`, :sha:`600dcd0`) +* Add option to opt-in for EC2 SigV4 (:issue:`2074`, :sha:`4d780bd`) +* Add Autoscale feature to get all adjustment types (:issue:`2058`, + :issue:`1538`, :sha:`b9c7e15`) +* Add Route53 unit tests (:issue:`2066`, :sha:`e859576`) +* Add a basic Route53 tutorial (:issue:`2060`, :sha:`f0ad46b`) +* Add Autoscale associated public IP to launch configuration (:issue:`2051`, + :issue:`2028`, :issue:`2029`, :sha:`c58bda6`) +* Add option to pass VPC zone identifiers as a Python list (:issue:`2047`, + :issue:`1772`, :sha:`07ef9e1`) +* Add RDS call to get all log files (:issue:`2040`, :issue:`1994`, + :sha:`925b8cb`) + + +Bugfixes +-------- + +* Changed S3 ``get_bucket`` to use ``HEAD`` in place of ``GET``. (:issue:`2078`, + :issue:`2082`, :sha:`016be83`) +* Fix EMR's describe_cluster_command. (:issue:`2034`, :sha:`1c5621e`) +* Tutorial small code fix (:issue:`2072`, :sha:`38e7db1`) +* Fix CloudFront string representation (:issue:`2069`, :sha:`885c397`) +* Route53 doc cleanup (:issue:`2059`, :sha:`d2fc38e`) +* Fix MWS parsing of GetProductCategoriesForASIN response. (:issue:`2024`, + :sha:`0af08ce`) +* Fix SQS docs for get_queue_attributes (:issue:`2061`, :sha:`1cdc326`) +* Don't insert a '?' in URLs unless there is a query string (:issue:`2042`, + :issue:`1943`, :sha:`c15ce60`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.26.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.26.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..a586459c1de07400780584cdc25e08b80dfc1150 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.26.0.rst @@ -0,0 +1,59 @@ +boto v2.26.0 +============ + +:date: 2014/02/27 + +This release adds support for MFA tokens in the AWS STS ``assume_role`` & the +introduction of the ``boto.rds2`` module (which has full support for the entire +RDS API). It also includes the addition of request hooks & many bugfixes. + + +Changes +------- + +* Added support for MFA in STS AssumeRole. (:sha:`899810c`) +* Fixed how DynamoDB v2 works with Global Secondary Indexes. (:issue:`2122`, + :sha:`f602c95`) +* Add request hooks and request logger. (:issue:`2125`, :sha:`e8b20fe`) +* Don't pull the security token from the environment or config when a caller + supplies the access key and secret. (:issue:`2123`, :sha:`4df1694`) +* Read EvaluateTargetHealth from Route53 resource record set. (:issue:`2120`, + :sha:`0a97158`) +* Prevent implicit string decode in hmac-v4 handlers. (:issue:`2037`, + :issue:`2033`, :sha:`8e56a5f`) +* Updated Datapipeline to include all current regions. (:issue:`2121`, + :sha:`dff5e3e`) +* Bug fix for Google Storage generate_url authentication. (:issue:`2116`, + :issue:`2108`, :sha:`5a50932`) +* Handle JSON error responses in BotoServerError. (:issue:`2113`, :issue:`2077`, + :sha:`221085e`) +* Corrected a typo in SQS tutorial. (:issue:`2114`, :sha:`7ed41f7`) +* Add CloudFormation template capabilities support. (:issue:`2111`, + :issue:`2075`, :sha:`65a4323`) +* Add SWF layer1_decisions to docs. (:issue:`2110`, :issue:`2062`, + :sha:`6039cc9`) +* Add support for request intervals in health checks. (:issue:`2109`, + :sha:`660b01a`) +* Added checks for invalid regions to the ``bin`` scripts (:issue:`2107`, + :sha:`bbb9f1e`) +* Better error output for unknown region - (:issue:`2041`, :issue:`1983`, + :sha:`cd63f92`) +* Added certificate tests for CloudTrail. (:issue:`2106`, :sha:`a7e9b4c`) +* Updated Kinesis endpoints. (:sha:`7bd4b6e`) +* Finished implementation of RDS's DescribeDBLogFiles. (:issue:`2084`, + :sha:`f3c706c`) +* Added support for RDS log file downloading. (:issue:`2086`, :issue:`1993`, + :sha:`4c51841`) +* Added some unit tests for CloudFront. (:issue:`2076`, :sha:`6c46b1d`) +* GS should ignore restore_headers as they are never set. (:issue:`2067`, + :sha:`f02aeb3`) +* Update CloudFormation to support the latest API. (:issue:`2101`, + :sha:`ea1b1b6`) +* Added Route53 health checks. (:issue:`2054`, :sha:`9028f7d`) +* Merge branch 'rds2' into develop Fixes #2097. (:issue:`2097`, :sha:`6843c16`) +* Fix Param class convert method (:issue:`2094`, :sha:`5cd4598`) +* Added support for Route53 aliasing. (:issue:`2096`, :sha:`df5fa40`) +* Removed the dependence on ``example.com`` within the Route53 tests. + (:issue:`2098`, :sha:`6ce9e0f`) +* Fixed ``has_item`` support in DynamoDB v2. (:issue:`2090`, :sha:`aada5d3`) +* Fix a little typo bug in the S3 tutorial. (:issue:`2088`, :sha:`c091d27`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.26.1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.26.1.rst new file mode 100644 index 0000000000000000000000000000000000000000..5fe9f468f444ee8345891a12b10327fbe45934fc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.26.1.rst @@ -0,0 +1,14 @@ +boto v2.26.1 +============ + +:date: 2014/03/03 + +This release fixes an issue with the newly-added ``boto.rds2`` module when +trying to use ``boto.connect_rds2``. Parameters were not being passed correctly, +which would cause an immediate error. + + +Changes +------- + +* Fixed ``boto.connect_rds2`` to use kwargs. (:sha:`3828ece`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.27.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.27.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..fb8deef668da15c7dd06404fa58385e07f8aac23 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.27.0.rst @@ -0,0 +1,33 @@ +boto v2.27.0 +============ + +:date: 2014/03/06 + +This release adds support for configuring access logs on Elastic Load Balancing +(including what Amazon Simple Storage Service (S3) bucket to use & how +frequently logs should be added to the bucket), adds request hook documentation +& a host of doc updates/bugfixes. + + +Changes +------- + +* Added support for ``AccessLog`` in ELB (:issue:`2150`, :sha:`7aa35ea`) +* Added better BlockDeviceType deserialization in Autoscaling. (:issue:`2149`, + :sha:`04d29a5`) +* Updated CloudFormation documentation (:issue:`2147`, :sha:`2535aca`) +* Updated Kinesis documentation (:issue:`2146`, :sha:`01425dc`) +* Add optional bucket tags to `lss3` output. (:issue:`2132`, :sha:`0f35924`) +* Fix getting instance types for Eucalyptus 4.0. (:issue:`2118`, :sha:`18dc07d`) +* Fixed how quoted strings are handled in SigV4 (:issue:`2142`, :sha:`2467547`) +* Use system supplied certs without a bundle file (:issue:`2139`, + :sha:`70d15b8`) +* Fixed incorrect test failures in EC2 ``trim_snapshots`` (:sha:`1fa9df7`) +* Raise any exceptions that are tagSet not found (:sha:`56d7d3e`) +* Added request hook docs (:issue:`2129`, :sha:`64eedce`) +* Fixed Route53 ``alias-healthcheck`` (:issue:`2126`, :sha:`141077f`) +* Fixed Elastic IP association in EC2 (:issue:`2131`, :issue:`1310`, + :sha:`d75fdfa`) +* Fixed builds on Travis for installing dependencies (:sha:`5e84e30`) +* Support printing tags on buckets when listing buckets (:sha:`c42a5dd`) +* PEP8/pyflakes/(some)pylint (:sha:`149175e`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.28.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.28.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..4297c555a6786112198ae9e83db5befb5d3871ab --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.28.0.rst @@ -0,0 +1,38 @@ +boto v2.28.0 +============ + +:date: 2014/05/08 + +This release adds support for Amazon SQS message attributes, Amazon DynamoDB query filters and enhanced conditional operators, adds support for the new Amazon CloudSearch 2013-01-01 API and includes various features and fixes for Amazon Route 53, Amazon EC2, Amazon Elastic Beanstalk, Amazon Glacier, AWS Identity and Access Management (IAM), Amazon S3, Mechanical Turk and MWS. + +Changes +------- +* Add support for SQS message attributes. (:issue:`2257`, :sha:`a04ca92`) +* Update DynamoDB to support query filters. (:issue:`2242`, :sha:`141eb71`) +* Implement new Cloudsearch API 2013-01-01 as cloudsearch2 module (:sha:`b0ababa`) +* Miscellaneous improvements to the MTurk CLI. (:issue:`2188`, :sha:`c213ff1`) +* Update MWS to latest API version and adds missing API calls. (:issue:`2203`, :issue:`2201`, :sha:`8adf720`, :sha:`8d0a6a8`) +* Update EC2 `register_image` to expose an option which sets whether an instance store is deleted on termination. The default value is left as-is. (:sha:`d295ee9`) +* Correct typo "possile" --> "possible". (:issue:`2196`, :sha:`d228352`) +* Update Boto configuration tutorial (:issue:`2191`, :sha:`f2a7a08`) +* Clarify that MTurkConnection.get_assignments attributes are actually strings. (:issue:`2187`, :issue:`2176`, :sha:`075636b`) +* Fix EC2 documentation typo (:issue:`2178`, :sha:`2627843`) +* Add support for ELB Connection Draining attribute. (:issue:`2174`, :issue:`2173`, :sha:`78fa43c`) +* Add support for setting failure threshold for Route53 health checks. (:issue:`2171`, :issue:`2170`, :sha:`15b812f`) +* Fix specification of Elastic Beanstalk tier parameter. (:issue:`2168`, :sha:`4492e86`) +* Fixed part of roboto for euca2ools. (:issue:`2166`, :issue:`1730`, :sha:`63b7a34`) +* Fixed removing policies from listeners. (:issue:`2165`, :issue:`1708`, :sha:`e5a2d9b`) +* Reintroduced the ``reverse`` fix for DDB. (:issue:`2163`, :sha:`70ec722`) +* Several fixes to DynamoDB describe calls. (:issue:`2161`, :issue:`1649`, :issue:`1663`, :sha:`84fb748`) +* Fixed how ``reverse`` works in DynamoDBv2. (:issue:`2160`, :issue:`2070`, :issue:`2115`, :sha:`afdd805`) +* Update Kinesis exceptions (:issue:`2159`, :issue:`2153`, :sha:`22c6751`) +* Fix ECS problem using new-style classes (:issue:`2103`, :sha:`dc466c7`) +* Add support for passing region info from SWF layer2 to layer1 (:issue:`2137`, :sha:`0dc8ce6`) +* Handle plus signs in S3 metadata (:issue:`2145`, :sha:`c2a0f95`) +* Fix Glacier vault date parsing (:issue:`2158`, :sha:`9e7b132`) +* Documentation fix. (:issue:`2156`, :sha:`7592a58`) +* Fix Route53 evaluate target health bug. (:issue:`2157`, :sha:`398bb62`) +* Removing obselete core directory. (:issue:`1987`, :sha:`8e83292`) +* Improve IAM behavior in the cn-north-1 region. (:issue:`2152`, :sha:`4050e70`) +* Add SetIdentityFeedbackForwardingEnabled and SetIdentityNotificationTopic for SES. (:issue:`2130`, :issue:`2128`, :sha:`83002d5`) +* Altered Route53 bin script to use UPSERT rather than CREATE. (:issue:`2151`, :sha:`2cd20e7`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.29.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.29.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..5e58781a4d475467bc8a93ee4064bb15703dcf12 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.29.0.rst @@ -0,0 +1,25 @@ +boto v2.29.0 +============ + +:date: 2014/05/29 + +This release adds support for the AWS shared credentials file, adds support for Amazon Elastic Block Store (EBS) encryption, and contains a handful of fixes for Amazon EC2, AWS CloudFormation, AWS CloudWatch, AWS CloudTrail, Amazon DynamoDB and Amazon Relational Database Service (RDS). It also includes fixes for Python wheel support. + +A bug has been fixed such that a new exception is thrown when a profile name is explicitly passed either via code (``profile="foo"``) or an environment variable (``AWS_PROFILE=foo``) and that profile does not exist in any configuration file. Previously this was silently ignored, and the default credentials would be used without informing the user. + +Changes +------- +* Added support for shared credentials file. (:issue:`2292`, :sha:`d5ed49f`) +* Added support for EBS encryption. (:issue:`2282`, :sha:`d85a449`) +* Added GovCloud CloudFormation endpoint. (:issue:`2297`, :sha:`0f75fb9`) +* Added new CloudTrail endpoints to endpoints.json. (:issue:`2269`, :sha:`1168580`) +* Added 'name' param to documentation of ELB LoadBalancer. (:issue:`2291`, :sha:`86e1174`) +* Fix typo in ELB docs. (:issue:`2294`, :sha:`37aaa0f`) +* Fix typo in ELB tutorial. (:issue:`2290`, :sha:`40a758a`) +* Fix OpsWorks ``connect_to_region`` exception. (:issue:`2288`, :sha:`26729c7`) +* Fix timezones in CloudWatch date range example. (:issue:`2285`, :sha:`138a6d0`) +* Fix description of param tags into ``rds2.create_db_subnet_group``. (:issue:`2279`, :sha:`dc1037f`) +* Fix the incorrect name of a test case. (:issue:`2273`, :sha:`ee195a1`) +* Fix "consistent" argument to ``boto.dynamodb2.table.Table.batch_get``. (:issue:`2272`, :sha:`c432b09`) +* Update the wheel to be python 2 compatible only. (:issue:`2286`, :sha:`6ad0b75`) +* Crate.io is no longer a package index. (:issue:`2289`, :sha:`7f23de0`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.29.1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.29.1.rst new file mode 100644 index 0000000000000000000000000000000000000000..12122ce798a3da129a8c963a45f9214c3ad32161 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.29.1.rst @@ -0,0 +1,11 @@ +boto v2.29.1 +============ + +:date: 2014/05/30 + +This release fixes a critical bug when the provider is not set to ``aws``, e.g. for Google Storage. It also fixes a problem with connection pooling in Amazon CloudSearch. + +Changes +------- +* Fix crash when provider is google. (:issue:`2302`, :sha:`33329d5888`) +* Fix connection pooling issue with CloudSearch (:sha:`82e83be12a`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.3.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.3.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..1ec69cbd43808873a7f7a21a04e7dc8deab92b61 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.3.0.rst @@ -0,0 +1,47 @@ +=========== +boto v2.3.0 +=========== + +The 2.3.0 release of boto is now available on `PyPI`_. + +.. _`PyPI`: http://pypi.python.org/pypi/boto + +You can view a list of issues that have been closed in this release at +https://github.com/boto/boto/issues?milestone=6&state=closed. + +You can get a comprehensive list of all commits made between the 2.2.2 release +and the 2.3.0 release at https://github.com/boto/boto/compare/2.2.2...2.3.0. + +This release includes initial support for Amazon Simple Workflow Service. + +The API version of the FPS module was updated to 2010-08-28. + +This release also includes many bug fixes and improvements in the Amazon +DynamoDB module. One change of particular note is the behavior of the +``new_item`` method of the ``Table`` object. See http://readthedocs.org/docs/boto/en/2.3.0/ref/dynamodb.html#module-boto.dynamodb.table +for more details. + +There were 109 commits in this release from 21 different authors. +The authors are listed below, in no particular order: + +* theju +* garnaat +* rdodev +* mfschwartz +* kopertop +* tpodowd +* gtaylor +* kachok +* croach +* tmorgan +* Erick Fejta +* dherbst +* marccohen +* Arif Amirani +* yuzeh +* Roguelazer +* awblocker +* blinsay +* Peter Broadwell +* tierney +* georgekola diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.30.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.30.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..bf5ad6d004012a4396882428f823211004f5c03b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.30.0.rst @@ -0,0 +1,28 @@ +boto v2.30.0 +============ + +:date: 2014/07/01 + +This release adds new Amazon EC2 instance types, new regions for AWS CloudTrail and Amazon Kinesis, Amazon S3 presigning using signature version 4, and several documentation and bugfixes. + + +Changes +------- +* Add EC2 T2 instance types (:sha:`544f8925cb`) +* Add new regions for CloudTrail and Kinesis (:sha:`4d67e19914`) +* Fixed some code formatting and typo in SQS tutorial docs. (:issue:`2332`, :sha:`08c8fed`) +* Documentation update -- Child workflows and poll API. (:issue:`2333`, :issue:`2063`, :issue:`2064`, :sha:`4835676`) +* DOC Tutorial update for metrics and use of dimensions property. (:issue:`2340`, :issue:`2336`, :sha:`45fda90`) +* Let people know only EC2 supported for cloudwatch. (:issue:`2341`, :sha:`98f03e2`) +* Add namespace to AccessControlPolicy xml representation. (:issue:`2342`, :sha:`ce07446`) +* Make ip_addr optional in Route53 HealthCheck. (:issue:`2345`, :sha:`79c35ca`) +* Add S3 SigV4 Presigning. (:issue:`2349`, :sha:`125c4ce`) +* Add missing route53 autodoc. (:issue:`2343`, :sha:`6472811`) +* Adds scan_index_forward and limit to DynamoDB table query count. (:issue:`2184`, :sha:`4b6d222`) +* Add method TaggedEC2Object.add_tags(). (:issue:`2259`, :sha:`eea5467`) +* Add network interface lookup to EC2. Add update/attach/detach methods to NetworkInterface object. (:issue:`2311`, :sha:`4d44530`) +* Parse date/time in a locale independent manner. (:issue:`2317`, :issue:`2271`, :sha:`3b715e5`) +* Add documentation for delete_hosted_zone. (:issue:`2316`, :sha:`a0fdd39`) +* s/existance/existence/ (:issue:`2315`, :sha:`b8dfa1c`) +* Add multipart upload section to the S3 tutorial. (:issue:`2308`, :sha:`99953d4`) +* Only attempt shared creds load if path is a file. (:issue:`2305`, :sha:`0bffa3b`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.31.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.31.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..af140f0356d3ce15dc3384947eaecd817b9731f3 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.31.0.rst @@ -0,0 +1,11 @@ +boto v2.31.0 +============ + +:date: 2014/07/10 + +This release adds support for Amazon CloudWatch Logs. + + +Changes +------- +* Add support for Amazon CloudWatch Logs. (:sha:`125c94d`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.31.1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.31.1.rst new file mode 100644 index 0000000000000000000000000000000000000000..b8ec0ca74ab96d8e568769d637d7a5d5dfd2b489 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.31.1.rst @@ -0,0 +1,6 @@ +boto v2.31.1 +============ + +:date: 2014/07/10 + +This release fixes an installation bug in the 2.31.0 release. diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.32.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.32.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..10ea30c37939362f064f6cacddb8b8d7330a2e5f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.32.0.rst @@ -0,0 +1,113 @@ +boto v2.32.0 +============ + +:date: 2014/07/30 + +This release includes backward-compatible support for Python 3.3 and 3.4, +support for IPv6, Amazon VPC connection peering, Amazon SNS message +attributes, new regions for Amazon Kinesis, and several fixes. + + +Python 3 Support +---------------- +* DynamoDB (:issue:`2441`, :sha:`0ef0466`, :issue:`2473`, :sha:`102c3b6`, + :issue:`2453`) +* CloudWatch Logs (:issue:`2448`, :sha:`23cbcd1`) +* Support (:issue:`2406`, :sha:`7b489a0`) +* Elastic Beanstalk (:issue:`2372`, :sha:`d45d00e`) +* CloudSearch (:issue:`2439`, :sha:`25416f9`, :issue:`2432`, :sha:`b17f2d9`) +* STS (:issue:`2435`, :sha:`1c1239b`) +* SimpleDB (:issue:`2403`, :sha:`604318d`) +* EC2 (:issue:`2424`, :sha:`5e5dc4c`) +* VPC (:issue:`2399`, :sha:`356da91`) +* OpsWorks (:issue:`2402`, :sha:`68d15a5`) +* CloudWatch (:issue:`2400`, :sha:`a4d0a7a`) +* SWF (:issue:`2397`, :sha:`6db918e`) +* MWS (:issue:`2385`, :sha:`5347fbd`) +* ELB (:issue:`2384`, :sha:`4dcc9be`) +* Elastic Transcoder (:issue:`2382`, :sha:`40c5e35`) +* EMR (:issue:`2381`, :sha:`edf4020`) +* Route53 (:issue:`2359`, :sha:`15514f7`) +* Glacier (:issue:`2357`, :sha:`a41042e`) +* RedShift (:issue:`2362`, :sha:`b8888cc`) +* CloudFront (:issue:`2355`, :sha:`f2f54b1`) +* ECS (:issue:`2364`, :sha:`ab84969`) +* Fix pylintrc to run with pylint/python 3. (:issue:`2366`, :sha:`6292ab2`) +* SNS (:issue:`2365`, :sha:`170f735`) +* AutoScaling (:issue:`2393`, :sha:`6a78057`) +* Direct Connect (:issue:`2361`, :sha:`8488d94`) +* CloudFormation (:issue:`2373`, :sha:`9872f27`) +* IAM (:issue:`2358`, :sha:`29ad3e3`) +* ElastiCache (:issue:`2356`, :sha:`2880f91`) +* SES (:issue:`2354`, :sha:`1db129e`) +* Fix S3 integration test on Py3. (:issue:`2466`, :sha:`f3eb4cd`) +* Use unittest.mock if exists. (:issue:`2451`, :sha:`cc58978`) +* Add tests/compat.py for test-only imports. (:issue:`2442`, :sha:`556f3cf`) +* Add backward-compatible support for Python 3.3+ (S3, SQS, Kinesis, + CloudTrail). (:issue:`2344`, :issue:`677`, :sha:`b503f4b`) + + +Features +-------- +* Add marker param to describe all ELBs. (:issue:`2433`, :sha:`49af8b6`) +* Update .travis.yml to add pypy. (:issue:`2440`, :sha:`4b8667c`) +* Add 'include_all_instances' support to 'get_all_instance_status'. + (:issue:`2446`, :issue:`2230`, :sha:`5949012`) +* Support security tokens in configuration file profiles. + (:issue:`2445`, :sha:`a16bcfd`) +* Singapore, Sydney and Tokyo are missing in Kinesis Region. + (:issue:`2434`, :sha:`723290d`) +* Add support for VPC connection peering. (:issue:`2438`, :sha:`63c78a8`) +* Add seperate doc requirements. (:issue:`2412`, :sha:`2922d89`) +* Route53 support IP health checks (:issue:`2195`, :sha:`319d44e`) +* IPv6 support when making connections (:issue:`2380`, :sha:`1e70179`) +* Support SNS message attributes (:issue:`2360`, :sha:`ec106bd`) +* Add "attributes" argument to boto.dynamodb2.table.Table.batch_get. + (:issue:`2276`, :sha:`fe67f43`) +* Add documentation for top-level S3 module. (:issue:`2379`, :sha:`db77546`) + + +Fixes +----- +* Prevent an infinite loop. (:issue:`2465`, :sha:`71b795a`) +* Updated documentation for copy_image. (:issue:`2471`, :sha:`f9f683a`) +* Fixed #2464 added keyword "detailed" to docs. (:issue:`2467`, :issue:`2464`, + :sha:`eb26fdc`) +* Retry installation commands on Travis CI. (:issue:`2457`, :sha:`a9e8057`) +* Fix for run_instances() network_interfaces argument documentation. + (:issue:`2461`, :sha:`798fd70`) +* pyami module: tidy up to meet PEP8 better. (:issue:`2460`, :sha:`e5a23ed`) +* Updating documentation on cloudsearch regions. (:issue:`2455`, :sha:`de284a4`) +* Fixing lost errors bug in cloudsearch2 commit implementation. + (:issue:`2408`, :sha:`fedb937`) +* Import json from boto.compat for several modules. + (:issue:`2450`, :sha:`55e716b`) +* Relocate MWS requirements checks; closes #2304, #2314. + (:issue:`2314`, :issue:`2304`, :sha:`6a8f98b`) +* Added support for creating EMR clusters with a ServiceRole. + (:issue:`2389`, :sha:`7693956`) +* Doc fix: doc_service instead of service on Deleting. + (:issue:`2419`, :sha:`f7b7980`) +* Fix dummy value typo on aws_access_key_id. (:issue:`2418`, :sha:`fc2a212`) +* Fix typo; add test. (:issue:`2447`, :sha:`effa8a8`) +* Fix CloudWatch Logs docstring. (:issue:`2444`, :sha:`d4a2b02`) +* Fix S3 mock encoding bug (:issue:`2443`, :sha:`8dca89b`) +* Skip the ETag header check in responce while using SSE-C encrpytion of S3. + (:issue:`2368`, :sha:`907fc6d`) +* Fix Beanstalk exception handling. (:issue:`2431`, :sha:`40f4b5d`) +* EC2 UserData encoding fix (Full version of #1698). + (:issue:`2396`, :issue:`1698`, :sha:`78300f1`) +* Fetch S3 key storage class on-demand. (:issue:`2404`, :sha:`8c4cc67`) +* Added documentation for /manage/cmdshell.py. (:issue:`2395`, :sha:`5a28d1c`) +* Remove redundant lines in auth.py. (:issue:`2374`, :sha:`317e322`) +* Fix SWF continue_as_new_workflow_execution start_to_close_timeout. + (:issue:`2378`, :sha:`5101b06`) +* Fix StringIO imports and invocations. (:issue:`2390`, :sha:`03952c7`) +* Fixed wrong call of urlparse. (:issue:`2387`, :sha:`4935f67`) +* Update documentation on Valid Values for ses:SetIdentityNotificationTopic. + (:issue:`2367`, :sha:`3f5de0d`) +* Correct list_saml_providers to return all items. + (:issue:`2338`, :sha:`9e9427f`) +* Fixing ELB unit tests. Also did some PEP8 cleanup on ELB code. + (:issue:`2352`, :sha:`5220621`) +* Documentation updates. (:issue:`2353`, :sha:`c9233d4`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.32.1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.32.1.rst new file mode 100644 index 0000000000000000000000000000000000000000..4b06d7976380846056abe615d8ec340fce4f7c70 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.32.1.rst @@ -0,0 +1,32 @@ +boto v2.32.1 +============ + +:date: 2014/08/04 + +This release fixes an incorrect Amazon VPC peering connection call, and fixes +several minor issues related to Python 3 support including a regression when +pickling authentication information. + + +Fixes +----- +* Fix bin scripts for Python 3. (:issue:`2502`, :issue:`2490`, :sha:`cb78c52`) +* Fix parsing of EMR step summary response. (:issue:`2456`, :sha:`2ffb00a`) +* Update wheel to be universal for py2/py3. (:issue:`2478`, :sha:`e872d94`) +* Add pypy to tox config. (:issue:`2458`, :sha:`16c6fbe`) +* Fix Glacier file object hash calculation. (:issue:`2489`, :issue:`2488`, + :sha:`a9463c5`) +* PEP8 fixes for Glacier. (:issue:`2469`, :sha:`0575a54`) +* Use ConfigParser for Python 3 and SafeConfigParser for Python 2. + (:issue:`2498`, :issue:`2497`, :sha:`f580f73`) +* Remove redundant __future__ imports. (:issue:`2496`, :sha:`e59e199`) +* Fix dynamodb.types.Binary non-ASCII handling. (:issue:`2492`, :issue:`2491`, + :sha:`16284ea`) +* Add missing dependency to requirements.txt. (:issue:`2494`, :sha:`33db71a`) +* Fix TypeError when getting instance metadata under Python 3. (:issue:`2486`, + :issue:`2485`, :sha:`6ff525e`) +* Handle Cloudsearch indexing errors. (:issue:`2370`, :sha:`494a091`) +* Remove obsolete md5 import routine. (:issue:`2468`, :sha:`9808a77`) +* Use encodebytes instead of encodestring. (:issue:`2484`, :issue:`2483`, + :sha:`984c5ff`) +* Fix an auth class pickling bug. (:issue:`2479`, :sha:`07d6424`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.33.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.33.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..d3182bad7303969d90f456044f8ffbbbe45c9ce7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.33.0.rst @@ -0,0 +1,61 @@ +boto v2.33.0 +============= + +:date: 2014/10/08 + +This release adds support for Amazon Route 53 Domains, Amazon Cognito Identity, +Amazon Cognito Sync, the DynamoDB document model feature, and fixes several +issues. + + +Changes +------- +* Added TaggedEC2Object.remove_tags. (:issue:`2610`, :issue:`2269`, :issue:`2414`, :sha:`bce8fcf`) +* Fixed 403 error from url encoded User-Agent header (:issue:`2621`, :sha:`2043a89`) +* Inserted break when iterating Route53 records. (:issue:`2631`, :sha:`2de8716`) +* Fix typo in ELB ConnectionSettings attribute (:issue:`2602`, :sha:`63bd53b`) +* PEP8 fixes to various common modules. (:issue:`2611`, :sha:`44d873d`) +* Route Tables: Update describe_route_tables to support additional route types (VPC peering connection, NIC). (:issue:`2598`, :issue:`2597`, :sha:`bbe8ce7`) +* Fix an error in Python 3 when creating launch configs. Enables AutoScaling unit tests to run by default. (:issue:`2591`, :sha:`fb4aeec`) +* Use svg instead of png to get better image quality. (:issue:`2588`, :sha:`1de6b41`) +* STS now signs using sigv4. (:issue:`2627`, :sha:`36b247f`) +* Added support for Amazon Cognito. (:issue:`2608`, :sha:`fa3a39e`) +* Fix bug where sigv4 custom metadata headers were presigned incorrectly. (:issue:`2604`, :sha:`8853e8e`) +* Add some regions to cloudsearch (:issue:`2593`, :sha:`8c6ea21`) +* fix typo in s3 tutorial (:issue:`2612`, :sha:`92dd581`) +* fix ELB ConnectionSettings values in documentation (:issue:`2620`, :sha:`d2231a2`) +* Few typo in docstring (:issue:`2590`, :sha:`0238747`) +* Add support for Amazon Route 53 Domains. (:issue:`2601`, :sha:`d149a87`) +* Support EBS encryption in BlockDeviceType. (:issue:`2587`, :issue:`2480`, :sha:`7a39741`) +* Fix a typo in auth.py: Bejing -> Beijing. (:issue:`2585`, :sha:`8525616`) +* Update boto/cacerts/cacerts.txt. (:issue:`2567`, :sha:`02b836c`) +* route53 module: tidy up to meet PEP8 better. (:issue:`2571`, :sha:`3a3e960`) +* Update count_slow documentation. (:issue:`2569`, :sha:`e926d2d`) +* iam module: tidy up to meet PEP8 better. (:issue:`2566`, :sha:`3c83da9`) +* Assigning ACL ID to network_acl_id instead of route_table_id. (:issue:`2548`, :sha:`c017b02`) +* Avoid infinite loop with bucket listing and encoding_type='url'. (:issue:`2562`, :issue:`2561`, :sha:`39cbcb5`) +* Use urllib timeout param instead of hacking socket global timeout. (:issue:`2560`, :issue:`1935`, :sha:`c1dd1fb`) +* Support non-ascii unicode strings in _get_all_query_args. Fixes: #2558, #2559. (:issue:`2559`, :issue:`2558`, :sha:`069d04b`) +* Truncated Response Handling in Route53 ListResourceRecordSets. (:issue:`2542`, :sha:`3ba380f`) +* Update to latest OpsWorks API. (:issue:`2547`, :sha:`ac2b311`) +* Better S3 key repr support for unicode. (:issue:`2525`, :issue:`2516`, :sha:`8198884`) +* Skip test when locale is missing. (:issue:`2554`, :issue:`2540`, :sha:`2b87583`) +* Add profile_name support to SQS. (:issue:`2459`, :sha:`3837951`) +* Include test_endpoints.json in source distribution. (:issue:`2550`, :sha:`7f907b7`) +* Pass along params in make_request for elastic transcoder api. (:issue:`2537`, :sha:`964999e`) +* Documents not found behavior of get_item(). (:issue:`2544`, :sha:`9b9c1c4`) +* Support auth when headers contains bytes. (:issue:`2521`, :issue:`2520`, :sha:`885348d`) +* PEP8 style fixes for ElastiCache. (:issue:`2539`, :sha:`bd0d6db`) +* PEP8 style fixes for SES. (:issue:`2538`, :sha:`c620c43`) +* Doc updates for CloudSearch. (:issue:`2546`, :sha:`9efebc2`) +* Update to latest Redshift API. (:issue:`2545`, :sha:`9151092`) +* Update to latest support API. (:issue:`2541`, :issue:`2426`, :sha:`8cf1b52`) +* Uses file name as archive description when uploading to glacier. (:issue:`2535`, :issue:`2528`, :sha:`38478c1`) +* Fix the ec2.elb.listener.Listener class's __getitem__ method. (:issue:`2533`, :sha:`7b67f98`) +* Add recognized HTTP headers for S3 metadata. (:issue:`2477`, :issue:`2050`, :sha:`c8c625a`) +* Fix class name for document. (:issue:`2530`, :sha:`2f0e689`) +* Copy CloudSearch proxy settings to endpoint services. (:issue:`2513`, :sha:`3cbbc21`) +* Merge branch 'develop' into cloudsearch2-proxy (:sha:`5b424db`) +* Add IAMer as an application built on boto. (:issue:`2515`, :sha:`1f35224`) + + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.34.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.34.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..6dd28d7c92abf85abe86e4a72ce682ccc49f7761 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.34.0.rst @@ -0,0 +1,21 @@ +boto v2.34.0 +============ + +:date: 2014/10/23 + +This release adds region support for ``eu-central-1`` , support to create +virtual mfa devices for Identity and Access Management, and fixes several +sigv4 issues. + + +Changes +------- +* Calculate sha_256 correctly for s3 (:issue:`2691`, :sha:`c0a001f`) +* Fix MTurk typo. (:issue:`2429`, :issue:`2428`, :sha:`9bfff19`) +* Fix Amazon Cognito links in docs (:issue:`2674`, :sha:`7c28577`) +* Add the ability to IAM to create a virtual mfa device. (:issue:`2675`, :sha:`075d402`) +* PEP8 tidy up for several modules. (:issue:`2673`, :sha:`38abbd9`) +* Fix s3 create multipart upload for sigv4 (:issue:`2684`, :sha:`fc73641`) +* Updated endpoints.json for cloudwatch logs to support more regions. (:issue:`2685`, :sha:`5db2ea8`) + + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.35.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.35.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..b81d4c4fee1a58efada0604e0c0ce4fbc2380f6f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.35.0.rst @@ -0,0 +1,55 @@ +boto v2.35.0 +=========== + +:date: 2015/01/08 + +This release adds support for Amazon EC2 Classic Link which allows users +to link classic instances to Classic Link enabled VPCs, +adds support for Amazon CloudSearch Domain, adds sigv4 support +for Elastic Load Balancing, and fixes several other issues including issues +making anonymous AWS Security Token Service requests. + + +Changes +------- +* Add Amazon EC2 Classic Link support (:sha: `5dbd2d7`) +* Add query string to body for anon STS POST (:issue:`2812`, :sha:`6513789`) +* Fix bug that prevented initializing a dynamo item from existing item (:issue:`2764`, :sha:`743e814`) +* * switchover-sigv4: Add integ tests for sigv4 switchover Switch elb/ec2 over to signature version 4 (:sha:`0dadce8`) +* Return SetStackPolicyResponse - (:issue:`2822`, :issue:`2346`, :issue:`2639`, :sha:`c4defb4`) +* Added ELB Attributes to docs. (:issue:`2821`, :sha:`5dfeba9`) +* Fix bug by using correct string joining syntax. (:issue:`2817`, :sha:`8426148`) +* Fix SES get_identity_dkim_attributes when input length > 1. (:issue:`2810`, :sha:`cc4d42d`) +* DynamoDB table batch_get fails to process all remaining results if single batch result is empty. (:issue:`2809`, :sha:`a193bc0`) +* Added suppport for additional fields in EMR objects. (:issue:`2807`, :sha:`2936ac0`) +* Pass version_id in copy if key is versioned. (:issue:`2803`, :sha:`66b3604`) +* Add support for SQS PurgeQueue operation. (:issue:`2806`, :sha:`90a5d44`) +* Update documentation for launchconfig. (:issue:`2802`, :sha:`0dc8412`) +* Remove unimplemented config param. (:issue:`2801`, :issue:`2572`, :sha:`f1a5ebd`) +* Add support for private hosted zones. (:issue:`2785`, :sha:`2e7829b`) +* Fix Key.change_storage_class so that it obeys dst_bucket. (:issue:`2752`, :sha:`55ed184`) +* Fix for s3put host specification. (:issue:`2736`, :issue:`2522`, :sha:`1af31f2`) +* Improve handling of Glacier HTTP 204 responses. (:issue:`2726`, :sha:`c314298`) +* Fix raising exception syntax in Python 3. (:issue:`2735`, :issue:`2563`, :sha:`58f76f6`) +* Privatezone: Adding unit/integration test coverage (:issue:`1`, :sha:`d1ff14e`) +* Minor documentation/pep8 fixes. (:issue:`2753`, :sha:`6a853be`) +* Correct argument type in doc string. (:issue:`2728`, :sha:`1ddf6df`) +* Use exclusive start key to get all items from DynamoDB query. (:issue:`2676`, :issue:`2573`, :sha:`419d8a5`) +* Updated link to current config documentation. (:issue:`2755`, :sha:`9be3f85`) +* Fix the SQS certificate error for region cn-north-1. (:issue:`2766`, :sha:`1d5368a`) +* Adds support for getting health checker IP ranges from Route53. (:issue:`2792`, :sha:`ee14911`) +* fix: snap.create_volume documentation lists general purpose ssd. Fixes @2774. (:issue:`2774`, :sha:`36fae2b`) +* Fixed param type in get_contents_to_filename docstring. (:issue:`2783`, :sha:`478f66a`) +* Update DynamoDB local example to include fake access key id. (:issue:`2791`, :sha:`2c1f8d5`) +* Added 'end' attribute to ReservedInstance. (:issue:`2793`, :issue:`2757`, :sha:`28814d8`) +* Parse ClusterStatus’s StateChangeReason. (:issue:`2696`, :sha:`48c5d17`) +* Adds SupportedProducts field to EMR JobFlow objects. (:issue:`2775`, :sha:`6771d04`) +* Fix EMR endpoint. (:issue:`2750`, :sha:`8329e02`) +* Detect old-style S3 URL for auto-sigv4. (:issue:`2773`, :sha:`f5be409`) +* Throw host warning for cloudsearch domain (:issue:`2765`, :sha:`9af6f41`) +* Fix CloudSearch2 to work with IAM-based search and upload requests (:issue:`2717`, :sha:`9f4fe8b`) +* iam: add support for Account Password Policy APIs (:issue:`2574`, :sha:`6c9bd53`) +* Handle sigv4 non-string header values properly (:issue:`2744`, :sha:`e043e4b`) +* Url encode query string for pure query (:issue:`2720`, :sha:`bbbf9d2`) + + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.35.1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.35.1.rst new file mode 100644 index 0000000000000000000000000000000000000000..ef6525163f9602580e2a43d68c0157e6dbc0e9d3 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.35.1.rst @@ -0,0 +1,14 @@ +boto v2.35.1 +============ + +:date: 2015/01/09 + +This release fixes a regression which results in an infinite while loop of +requests if you query an empty Amazon DynamoDB table. + + +Changes +------- +* Check for results left after computing self._keys_left (:issue:`2871`, :sha:`d3c2595`) + + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.35.2.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.35.2.rst new file mode 100644 index 0000000000000000000000000000000000000000..ca7ed9abcf9be454fd53a3f1c26f2114c1cb5260 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.35.2.rst @@ -0,0 +1,16 @@ +boto v2.32.2 +============ + +:date: 2015/01/19 + +This release adds ClassicLink support for Auto Scaling and fixes a few issues. + + +Changes +------- +* Add support for new data types in DynamoDB. (:issue:`2667`, :sha:`68ad513`) +* Expose cloudformation `UsePreviousTemplate` parameter. (:issue:`2843`, :issue:`2628`, :sha:`873e89c`) +* Fix documentation around using custom connections for DynamoDB tables. (:issue:`2842`, :issue:`1585`, :sha:`71d677f`) +* Fixed bug that unable call query_2 after call describe method on dynamodb2 module. (:issue:`2829`, :sha:`66addce`) + + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.36.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.36.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..7f4b608226c11e00c4e562a17d4b0ada44429b27 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.36.0.rst @@ -0,0 +1,27 @@ +boto v2.36.0 +============ + +:date: 2015/01/27 + +This release adds support for AWS Key Management Service (KMS), AWS Lambda, +AWS CodeDeploy, AWS Config, AWS CloudHSM, Amazon EC2 Container Service (ECS), +Amazon DynamoDB online indexing, and fixes a few issues. + + +Changes +------- +* Add Amazon DynamoDB online indexing support. +* Allow for binary to be passed to sqs message (:issue:`2913`, :sha:`8af9b42`) +* Kinesis update (:issue:`2891`, :sha:`4874e19`) +* Fixed spelling of boto.awslambda package. (:issue:`2914`, :sha:`de769ac`) +* Add support for Amazon EC2 Container Service (:issue:`2908`, :sha:`4480fb4`) +* Add support for CloudHSM (:issue:`2905`, :sha:`6055a35`) +* Add support for AWS Config (:issue:`2904`, :sha:`51e9221`) +* Add support for AWS CodeDeploy (:issue:`2899`, :sha:`d935356`) +* Add support for AWS Lambda (:issue:`2896`, :sha:`6748016`) +* Update both Cognito's to the latest APIs (:issue:`2909`, :sha:`18c1251`) +* Add sts for eu-central-1. (:issue:`2906`, :sha:`54714ff`) +* Update opsworks to latest API (:issue:`2892`, :sha:`aed3302`) +* Add AWS Key Managment Support (:issue:`2894`, :sha:`ef7d2cd`) + + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.37.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.37.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..a49fcdd1211b8443a65f7b1b0861e4c7ce95f1fa --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.37.0.rst @@ -0,0 +1,39 @@ +boto v2.37.0 +============ + +:date: 2015/04/02 + +This release updates AWS CloudTrail to the latest API to suppor the +``LookupEvents`` operation, adds new regional service endpoints and fixes +bugs in several services. + +.. note:: + + The CloudTrail ``create_trail`` operation no longer supports the deprecated + ``trail`` parameter, which has been marked for removal by the service + since early 2014. Instead, you pass each trail parameter as a keyword + argument now. Please see the + `reference `__ + to help port over existing code. + + +Changes +------- +* Update AWS CloudTrail to the latest API. (:issue:`3074`, :sha:`bccc29a`) +* Add support for UsePreviousValue to CloudFormation UpdateStack. (:issue:`3029`, :sha:`8a8a22a`) +* Fix BOTH_PATH to work with Windows drives (:issue:`2823`, :sha:`7ba973e`) +* Fix division calculation in S3 docs. (:issue:`3018`, :sha:`4ffd9ba`) +* Add Boto 3 link in README. (:issue:`3013`, :sha:`561716c`) +* Add more regions for configservice (:issue:`3009`, :sha:`a82244f`) +* Add ``eu-central-1`` endpoints (Frankfurt region) for IAM and Route53 (:sha:`5ff4add`) +* Fix unit tests from hanging (:sha:`da9f9b7`) +* Fixed wording in dynamodb tutorial (:issue:`2993`, :sha:`36cadf4`) +* Update SWF objects to keep a consistent region name. (:issue:`2985`, :issue:`2980`, :issue:`2606`, :sha:`ce75a19`) +* Print archive ID in glacier upload script. (:issue:`2951`, :sha:`047c7d3`) +* Add some minor documentation for Route53 tutorial. (:issue:`2952`, :sha:`b855fb3`) +* Add Amazon DynamoDB online indexing support on High level API (:issue:`2925`, :sha:`0621c53`) +* Ensure Content-Length header is a string. (:issue:`2932`, :sha:`34a0f63`) +* Correct docs around overriding SGs on ELBs (:issue:`2937`, :sha:`84d0ff9`) +* Fix DynamoDB tests. (:sha:`616ee80`) +* Fix region bug. (:issue:`2927`, :sha:`b1cb61e`) +* Fix import for ``boto.cloudhsm.layer1.CloudHSMConnection``. (:issue:`2926`, :sha:`1944d35`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.38.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.38.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..94d90e5f0e7c79b60cef150ee2ec745ccc602d06 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.38.0.rst @@ -0,0 +1,13 @@ +boto v2.38.0 +============ + +:date: 2015/04/09 + +This release adds support for Amazon Machine Learning and fixes a couple of +issues. + + +Changes +------- +* Add support for Amazon Machine Learning (:sha:`ab32d572`) +* Fix issue with modify reserved instances for modifying instance type (:issue:`3085`, :sha:`b8ea7a04`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.4.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.4.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..5fb7d3dbf63e2ed0c73b798682f83cb957947e33 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.4.0.rst @@ -0,0 +1,60 @@ +=========== +boto v2.4.0 +=========== + +The 2.4.0 release of boto is now available on `PyPI`_. + +.. _`PyPI`: http://pypi.python.org/pypi/boto + +You can get a comprehensive list of all commits made between the 2.3.0 release +and the 2.4.0 release at https://github.com/boto/boto/compare/2.3.0...2.4.0. + +This release includes: + +* Initial support for Amazon Cloudsearch Service. +* Support for Amazon's Marketplace Web Service. +* Latency-based routing for Route53 +* Support for new domain verification features of SES. +* A full rewrite of the FPS module. +* Support for BatchWriteItem in DynamoDB. +* Additional EMR steps for installing and running Pig scripts. +* Support for additional batch operations in SQS. +* Better support for VPC group-ids. +* Many, many bugfixes from the community. Thanks for the reports and pull + requests! + +There were 175 commits in this release from 32 different authors. The authors +are listed below, in no particular order: + +* estebistec +* tpodowd +* Max Noel +* garnaat +* mfschwartz +* jtriley +* akoumjian +* jreese +* mulka +* Nuutti Kotivuori +* mboersma +* ryansb +* dampier +* crschmidt +* nithint +* sievlev +* eckamm +* imlucas +* disruptek +* trevorsummerssmith +* tmorgan +* evanworley +* iandanforth +* oozie +* aedeph +* alexanderdean +* abrinsmead +* dlecocq +* bsimpson63 +* jamesls +* cosmin +* gtaylor diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.5.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.5.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..5fb95c28caf1f2e255367aa4376944d7183de6f7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.5.0.rst @@ -0,0 +1,39 @@ +=========== +boto v2.5.0 +=========== + +The 2.5.0 release of boto is now available on `PyPI`_. + +.. _`PyPI`: http://pypi.python.org/pypi/boto + +You can get a comprehensive list of all commits made between the 2.4.1 release +and the 2.5.0 release at https://github.com/boto/boto/compare/2.4.1...2.5.0. + +This release includes: + +* Support for IAM Roles for EC2 Instances +* Added support for Capabilities in CloudFormation +* Spot instances in autoscaling groups +* Internal ELB's +* Added tenancy option to run_instances + +There were 77 commits in this release from 18 different authors. The authors +are listed below, in no particular order: + +* jimbrowne +* cosmin +* gtaylor +* garnaat +* brianjaystanley +* jamesls +* trevorsummerssmith +* Bryan Donlan +* davidmarble +* jtriley +* rdodev +* toby +* tpodowd +* srs81 +* mfschwartz +* rdegges +* gholms diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.5.1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.5.1.rst new file mode 100644 index 0000000000000000000000000000000000000000..db747bd3ebeb2f52e1f3137760ca1e8254c94f6d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.5.1.rst @@ -0,0 +1,6 @@ +=========== +boto v2.5.1 +=========== + +Release 2.5.1 is a bugfix release. It fixes the following critical issues: +* :issue:`819` diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.5.2.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.5.2.rst new file mode 100644 index 0000000000000000000000000000000000000000..66d6d718398b234e757aef1c7fe4f4c18be9c2d4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.5.2.rst @@ -0,0 +1,9 @@ +=========== +boto v2.5.2 +=========== + +Release 2.5.2 is a bugfix release. It fixes the following critical issues: +* :issue:`830` + +This issue only affects you if you are using DynamoDB on an EC2 instance with +IAM Roles. \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.6.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.6.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..124da3dffe3861f1821de8889fcf7641c9bb4893 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.6.0.rst @@ -0,0 +1,101 @@ +=========== +boto v2.6.0 +=========== + +The 2.6.0 release of boto is now available on `PyPI`_. + +.. _`PyPI`: http://pypi.python.org/pypi/boto + +You can get a comprehensive list of all commits made between the 2.5.2 release +and the 2.6.0 release at https://github.com/boto/boto/compare/2.5.2...2.6.0. + +This release includes: + +* Support for Amazon Glacier +* Support for AWS Elastic Beanstalk +* CORS support for Amazon S3 +* Support for Reserved Instances Resale in Amazon EC2 +* Support for IAM Roles + +SSL Certificate Verification +============================ + +In addition, this release of boto changes the default behavior with respect to +SSL certificate verification. Our friends at Google contributed code to boto +well over a year ago that implemented SSL certificate verification. At the +time, we felt the most prudent course of action was to make this feature an +opt-in but we always felt that at some time in the future we would enable cert +verification as the default behavior. Well, that time is now! + +However, in implementing this change, we came across a bug in Python for all +versions prior to 2.7.3 (see http://bugs.python.org/issue13034 for details). +The net result of this bug is that Python is able to check only the commonName +in the SSL cert for verification purposes. Any subjectAltNames are ignored in +large SSL keys. So, in addition to enabling verification as the default +behavior we also changed some of the service endpoints in boto to match the +commonName in the SSL certificate. + +If you want to disable verification for any reason (not advised, btw) you can +still do so by editing your boto config file (see +https://gist.github.com/3762068) or you can override it by passing +`validate_certs=False` to the Connection class constructor or the `connect_*` +function. + +Commits +======= + +There were 440 commits in this release from 53 different authors. The authors are listed below, in alphabetical order: + +* acorley +* acrefoot +* aedeph +* allardhoeve +* almost +* awatts +* buzztroll +* cadams +* cbednarski +* cosmin +* dangra +* darjus-amzn +* disruptek +* djw +* garnaat +* gertjanol +* gimbel0893 +* gochist +* graphaelli +* gtaylor +* gz +* hardys +* jamesls +* jijojv +* jimbrowne +* jtlebigot +* jtriley +* kopertop +* kotnik +* marknca +* mark_nunnikhoven +* mfschwartz +* moliware +* NeilW +* nkvoll +* nsitarz +* ohe +* pasieronen +* patricklucas +* pfig +* rajivnavada +* reversefold +* robie +* scott +* shawnps +* smoser +* sopel +* staer +* tedder +* yamatt +* Yossi +* yovadia12 +* zachhuff386 \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.7.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.7.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..633cacda42b457b34fbec8c099bb913fd28b022e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.7.0.rst @@ -0,0 +1,91 @@ +=========== +boto v2.7.0 +=========== + +The 2.7.0 release of boto is now available on `PyPI`_. + +.. _`PyPI`: http://pypi.python.org/pypi/boto + +You can get a comprehensive list of all commits made between the 2.6.0 release +and the 2.7.0 release at https://github.com/boto/boto/compare/2.6.0...2.7.0. + +This release includes: + +* Added support for AWS Data Pipeline - :sha:`999902` +* Integrated Slick53 into Route53 module - :issue:`1186` +* Add ability to use Decimal for DynamoDB numeric types - :issue:`1183` +* Query/Scan Count/ScannedCount support and TableGenerator improvements - + :issue:`1181` +* Added support for keyring in config files - :issue:`1157` +* Add concurrent downloader to glacier - :issue:`1106` +* Add support for tagged RDS DBInstances - :issue:`1050` +* Updating RDS API Version to 2012-09-17 - :issue:`1033` +* Added support for provisioned IOPS for RDS - :issue:`1028` +* Add ability to set SQS Notifications in Mechanical Turk - :issue:`1018` + +Commits +======= + +There were 447 commits in this release from 60 different authors. The authors +are listed below, in alphabetical order: + +* acrefoot +* Alex Schoof +* Andy Davidoff +* anoopj +* Benoit Dubertret +* bobveznat +* dahlia +* dangra +* disruptek +* dmcritchie +* emtrane +* focus +* fsouza +* g2harris +* garnaat +* georgegoh +* georgesequeira +* GitsMcGee +* glance- +* gtaylor +* hashbackup +* hinnerk +* hoov +* isaacbowen +* jamesls +* JerryKwan +* jimfulton +* jimbrowne +* jorourke +* jterrace +* jtriley +* katzj +* kennu +* kevinburke +* khagler +* Kodiologist +* kopertop +* kotnik +* Leftium +* lpetc +* marknca +* matthewandrews +* mfschwartz +* mikek +* mkmt +* mleonhard +* mraposa +* oozie +* phunter +* potix2 +* Rafael Cunha de Almeida +* reinhillmann +* reversefold +* Robie Basak +* seandst +* siroken3 +* staer +* tpodowd +* vladimir-sol +* yovadia12 diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.8.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.8.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..6c91dedb06f37c33215ff7404209378488a60dab --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.8.0.rst @@ -0,0 +1,45 @@ +=========== +boto v2.8.0 +=========== + +The 2.8.0 release of boto is now available on `PyPI`_. + +.. _`PyPI`: http://pypi.python.org/pypi/boto + +You can get a comprehensive list of all commits made between the 2.7.0 release +and the 2.8.0 release at https://github.com/boto/boto/compare/2.7.0...2.8.0. + +This release includes: + +* Added support for Amazon Elasticache +* Added support for Amazon Elastic Transcoding Service + +As well as numerous bug fixes and improvements. + +Commits +======= + +There were 115 commits in this release from 21 different authors. The authors +are listed below, in alphabetical order: + +* conorbranagan +* dkavanagh +* gaige +* garnaat +* halfaleague +* jamesls +* jjhooper +* jordansissel +* jterrace +* Kodiologist +* kopertop +* mfschwartz +* nathan11g +* pasc +* phobologic +* schworer +* seandst +* SirAlvarex +* Yaniv Ovadia +* yig +* yovadia12 diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.0.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.0.rst new file mode 100644 index 0000000000000000000000000000000000000000..8550fc5d4a28bbd2d095ba5582f0d78ee9faba4a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.0.rst @@ -0,0 +1,56 @@ +=========== +boto v2.9.0 +=========== + +The 2.9.0 release of boto is now available on `PyPI`_. + +.. _`PyPI`: http://pypi.python.org/pypi/boto + +You can get a comprehensive list of all commits made between the 2.8.0 release +and the 2.9.0 release at https://github.com/boto/boto/compare/2.8.0...2.9.0. + +This release includes: + +* Support for Amazon Redshift +* Support for Amazon DynamoDB's new API +* Support for AWS Opsworks +* Add `copy_image` to EC2 (AMI copy) +* Add `describe_account_attributes` and `describe_vpc_attribute`, and + `modify_vpc_attribute` operations to EC2. + +There were 240 commits made by 34 different authors: + +* g2harris +* Michael Barrett +* Pascal Hakim +* James Saryerwinnie +* Mitch Garnaat +* ChangMin Jeon +* Mike Schwartz +* Jeremy Katz +* Alex Schoof +* reinhillmann +* Travis Hobrla +* Zach Wilt +* Daniel Lindsley +* ksacry +* Michael Wirth +* Eric Smalling +* pingwin +* Chris Moyer +* Olivier Hervieu +* Iuri de Silvio +* Joe Sondow +* Max Noel +* Nate +* Chris Moyer +* Lars Otten +* Nathan Grigg +* Rein Hillmann +* Øyvind Saltvik +* Rayson HO +* Martin Matusiak +* Royce Remer +* Jeff Terrace +* Yaniv Ovadia +* Eduardo S. Klein diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.1.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.1.rst new file mode 100644 index 0000000000000000000000000000000000000000..488730efe5d6e476ecb38c5c51d897b8d1a57dbe --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.1.rst @@ -0,0 +1,48 @@ +boto v2.9.1 +=========== + +:date: 2013/04/30 + +Primarily a bugfix release, this release also includes support for the new +AWS Support API. + + +Features +-------- + +* AWS Support API - A client was added to support the new AWS Support API. It + gives programmatic access to Support cases opened with AWS. A short example + might look like:: + + >>> from boto.support.layer1 import SupportConnection + >>> conn = SupportConnection() + >>> new_case = conn.create_case( + ... subject='Description of the issue', + ... service_code='amazon-cloudsearch', + ... category_code='performance', + ... communication_body="We're seeing some latency from one of our...", + ... severity_code='low' + ... ) + >>> new_case['caseId'] + u'case-...' + + The :ref:`Support Tutorial ` has more information on how to use + the new API. (:sha:`8c0451`) + + +Bugfixes +-------- + +* The reintroduction of ``ResumableUploadHandler.get_upload_id`` that was + accidentally removed in a previous commit. (:sha:`758322`) +* Added ``OrdinaryCallingFormat`` to support Google Storage's certificate + verification. (:sha:`4ca83b`) +* Added the ``eu-west-1`` region for Redshift. (:sha:`e98b95`) +* Added support for overriding the port any connection in ``boto`` uses. + (:sha:`08e893`) +* Added retry/checksumming support to the DynamoDB v2 client. (:sha:`969ae2`) +* Several documentation improvements/fixes: + + * Incorrect docs on EC2's ``import_key_pair``. (:sha:`6ada7d`) + * Clearer docs on the DynamoDB ``count`` parameter. (:sha:`dfa456`) + * Fixed a typo in the ``autoscale_tut``. (:sha:`6df1ae`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.2.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.2.rst new file mode 100644 index 0000000000000000000000000000000000000000..0e9994ae38405807660686c1109c2f434a0ab5e6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.2.rst @@ -0,0 +1,18 @@ +boto v2.9.2 +=========== + +:date: 2013/04/30 + +A hotfix release that adds the ``boto.support`` into ``setup.py``. + + +Features +-------- + +* None. + + +Bugfixes +-------- + +* Fixed the missing ``boto.support`` in ``setup.py``. (:sha:`9ac196`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.3.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.3.rst new file mode 100644 index 0000000000000000000000000000000000000000..1835862ad7c927cb034adb18564e89a08e8a2a76 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.3.rst @@ -0,0 +1,53 @@ +boto v2.9.3 +=========== + +:date: 2013/05/15 + +This release adds ELB support to Opsworks, optimized EBS support in EC2 +AutoScale, Parallel Scan support to DynamoDB v2, a higher-level interface to +DynamoDB v2 and API updates to DataPipeline. + + +Features +-------- + +* ELB support in Opsworks - You can now attach & describe the Elastic Load + Balancers within the Opsworks client. (:sha:`ecda87`) +* Optimized EBS support in EC2 AutoScale - You can now specify whether an + AutoScale instance should be optimized for EBS I/O. (:sha:`f8acaa`) +* Parallel Scan support in DynamoDB v2 - If you have extra read capacity & + a large amount of data, you can scan over the records in parallel by + telling DynamoDB to split the table into segments, then spinning up + threads/processes to each run over their own segment. (:sha:`db7f7b` & :sha:`7ed73c`) +* Higher-level interface to DynamoDB v2 - A more convenient API for using + DynamoDB v2. The :ref:`DynamoDB v2 Tutorial ` has more + information on how to use the new API. (:sha:`0f7c8b`) + + +Backward-Incompatible Changes +----------------------------- + +* API Update for DataPipeline - The ``error_code`` (integer) argument to + ``set_task_status`` changed to ``error_id`` (string). Many documentation + updates were also added. (:sha:`a78572`) + + +Bugfixes +-------- + +* Bumped the AWS Support API version. (:sha:`0323f4`) +* Fixed the S3 ``ResumableDownloadHandler`` so that it no longer tries to use + a hashing algorithm when used outside of GCS. (:sha:`29b046`) +* Fixed a bug where Sig V4 URIs were improperly canonicalized. (:sha:`5269d8`) +* Fixed a bug where Sig V4 ports were not included. (:sha:`cfaba3`) +* Fixed a bug in CloudWatch's ``build_put_params`` that would overwrite + existing/necessary variables. (:sha:`550e00`) +* Several documentation improvements/fixes: + + * Added docs for RDS ``modify/modify_dbinstance``. (:sha:`777d73`) + * Fixed a typo in the ``README.rst``. (:sha:`181e0f`) + * Documentation fallout from the previous release. (:sha:`14a111`) + * Fixed a typo in the EC2 ``Image.run`` docs. (:sha:`5edd6a`) + * Added/improved docs for EC2 ``Image.run``. (:sha:`773ce5`) + * Added a CONTRIBUTING doc. (:sha:`cecbe8`) + * Fixed S3 ``create_bucket`` docs to specify "European Union". (:sha:`ddddfd`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.4.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.4.rst new file mode 100644 index 0000000000000000000000000000000000000000..675afd457aa3ec565a2282cfc45a7a48bfb17209 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.4.rst @@ -0,0 +1,30 @@ +boto v2.9.4 +=========== + +:date: 2013/05/20 + +This release adds updated Elastic Transcoder support & fixes several bugs +from recent releases & API updates. + + +Features +-------- + +* Updated Elastic Transcoder support - It now supports HLS, WebM, MPEG2-TS & a + host of `other features`_. (:sha:`89196a`) + + .. _`other features`: http://aws.typepad.com/aws/2013/05/new-features-for-the-amazon-elastic-transcoder.html + + +Bugfixes +-------- + +* Fixed a bug in the canonicalization of URLs on Windows. (:sha:`09ef8c`) +* Fixed glacier part size bug (:issue:`1478`, :sha:`9e04171`) +* Fixed a bug in the bucket regex for S3 involving capital letters. + (:sha:`950031`) +* Fixed a bug where timestamps from Cloudformation would fail to be parsed. + (:sha:`b40542`) +* Several documentation improvements/fixes: + + * Added autodocs for many of the EC2 apis. (:sha:`79f939`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.5.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.5.rst new file mode 100644 index 0000000000000000000000000000000000000000..5df46bd83fa0552497d351b2055c6091d903ed64 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.5.rst @@ -0,0 +1,32 @@ +boto v2.9.5 +=========== + +:date: 2013/05/28 + +This release adds support for `web identity federation`_ within the Secure +Token Service (STS) & fixes several bugs. + +.. _`web identity federation`: http://docs.aws.amazon.com/STS/latest/UsingSTS/CreatingWIF.html + +Features +-------- + +* Added support for web identity federation - You can now delegate token access + via either an Oauth 2.0 or OpenID provider. (:sha:`9bd0a3`) + + +Bugfixes +-------- + +* Altered the S3 key buffer to be a configurable value. (:issue:`1506`, + :sha:`8e3e36`) +* Added Sphinx extension for better release notes. (:issue:`1511`, + :sha:`e2e32d` & :sha:`3d998b`) +* Fixed a bug where DynamoDB v2 would only ever connect to the default endpoint. + (:issue:`1508`, :sha:`139912`) +* Fixed a iteration/empty results bug & a ``between`` bug in DynamoDB v2. + (:issue:`1512`, :sha:`d109b6`) +* Fixed an issue with ``EbsOptimized`` in EC2 Autoscale. (:issue:`1513`, + :sha:`424c41`) +* Fixed a missing instance variable bug in DynamoDB v2. (:issue:`1516`, + :sha:`6fa8bf`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.6.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.6.rst new file mode 100644 index 0000000000000000000000000000000000000000..9e163fba1d1d7ace87c1b1e10be8928a4f5e4257 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.6.rst @@ -0,0 +1,56 @@ +boto v2.9.6 +=========== + +:date: 2013/06/18 + +This release adds large payload support to Amazon SNS/SQS (from 32k to 256k +bodies), several minor API additions, new regions for Redshift/Cloudsearch & +a host of bugfixes. + + +Features +-------- + +* Added large body support to SNS/SQS. There's nothing to change in your + application code, but you can now send payloads of up to 256k in size. + (:sha:`b64947`) +* Added ``Vault.retrieve_inventory_job`` to Glacier. (:issue:`1532`, :sha:`33de29`) +* Added ``Item.get(...)`` support to DynamoDB v2. (:sha:`938cb6`) +* Added the ``ap-northeast-1`` region to Redshift. (:sha:`d3eb61`) +* Added all the current regions to Cloudsearch. (:issue:`1465`, :sha:`22b3b7`) + + +Bugfixes +-------- + +* Fixed a bug where ``date`` metadata couldn't be set on an S3 key. + (:issue:`1519`, :sha:`1efde8`) +* Fixed Python 2.5/Jython support in ``NetworkInterfaceCollection``. + (:issue:`1518`, :sha:`0d6af2`) +* Fixed a XML parsing error with ``InstanceStatusSet``. (:issue:`1493`, + :sha:`55d4f6`) +* Added a test case to try to demonstrate :issue:`443`. (:sha:`084dd5`) +* Exposed the current tree-hash & upload size on Glacier's ``Writer``. + (:issue:`1520`, :sha:`ade462`) +* Updated EC2 Autoscale to incorporate new cron-like parameters. (:issue:`1433`, + :sha:`266e25`, :sha:`871588` & :sha:`473e42`) +* Fixed ``AttributeError`` being thrown from ``LoadBalancerZones``. + (:issue:`1524`, :sha:`215ffa`) +* Fixed a bug with empty facets in Cloudsearch. (:issue:`1366`, :sha:`7a108e`) +* Fixed an S3 timeout/retry bug where HTTP 400s weren't being honored. + (:issue:`1528`, :sha:`efd9af` & :sha:`16ae74`) +* Fixed ``get_path`` when ``suppress_consec_slashes=False``. (:issue:`1522`, + :sha:`c5dffc`) +* Factored out how some of S3's ``query_args`` are constructed. (:sha:`9f73de`) +* Added the ``generation`` query param to ``gs.Key.open_read``. (:sha:`cb4427`) +* Fixed a bug with the canonicalization of URLs with trailing slashes in + the SigV4 signer. (:issue:`1541`, :sha:`dec541`, :sha:`3f2b33`) +* Several documentation improvements/fixes: + + * Updated the release notes slightly. (:sha:`7b6079`) + * Corrected the ``num_cb`` param on ``set_contents_from_filename``. + (:issue:`1523`, :sha:`44be69`) + * Fixed some example code in the DDB migration guide. (:issue:`1525`, + :sha:`6210ca`) + * Fixed a typo in one of the DynamoDB v2 examples. (:issue:`1551`, + :sha:`b0df3e`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.7.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.7.rst new file mode 100644 index 0000000000000000000000000000000000000000..13e684a342e3dc704a5fc4a1ddfb43685195495a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.7.rst @@ -0,0 +1,40 @@ +boto v2.9.7 +=========== + +:date: 2013/07/08 + +This release is primarily a bugfix release, but also inludes support for +Elastic Transcoder updates (variable bit rate, max frame rate & watermark +features). + + +Features +-------- + +* Added support for selecting specific attributes in DynamoDB v2. + (:issue:`1567`, :sha:`d9e5c2`) +* Added support for variable bit rate, max frame rate & watermark features in + Elastic Transcoder. (:sha:`3791c9`) + + +Bugfixes +-------- + +* Altered RDS to now use SigV4. (:sha:`be1633`) +* Removed parsing check in ``StorageUri``. (:sha:`21bc8f`) +* More information returned about GS key generation. (:issue:`1571`, + :sha:`6d5e3a`) +* Upload handling headers now case-insensitive. (:issue:`1575`, :sha:`60383d`) +* Several CloudFormation timestamp updates. (:issue:`1582`, :issue:`1583`, + :issue:`1588`, :sha:`0a23d34`, :sha:`6d4209`) +* Corrected a bug in how limits are handled in DynamoDB v2. (:issue:`1590`, + :sha:`710a62`) +* Several documentation improvements/fixes: + + * Typo in ``boto.connection`` fixed. (:issue:`1569`, :sha:`cf39fd`) + * All previous release notes added to the docs. (:sha:`165596`) + * Corrected error in ``get_all_tags`` docs. (:sha:`4bca5d`) + * Corrected a typo in the S3 tutorial. (:sha:`f0cef8`) + * Corrected several import errors in the DDBv2 tutorial. (:sha:`5401a3`) + * Fixed an error in the ``get_key_pair`` docstring. (:issue:`1590`, + :sha:`a9cb8d`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.8.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.8.rst new file mode 100644 index 0000000000000000000000000000000000000000..0398582dc36cdf2bfc4285ef686ca7cf13ab099b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.8.rst @@ -0,0 +1,35 @@ +boto v2.9.8 +=========== + +:date: 2013/07/18 + +This release is adds new methods in AWS Security Token Service (STS), AWS +CloudFormation, updates AWS Relational Database Service (RDS) & Google Storage. +It also has several bugfixes & documentation improvements. + + +Features +-------- + +* Added support for the ``DecodeAuthorizationMessage`` in STS (:sha:`1ada5ac`). +* Added support for creating/deleting/describing ``OptionGroup`` in RDS. + (:sha:`d629228` & :sha:`d059a3b`) +* Added ``CancelUpdateStack`` to CloudFormation. (:issue:`1476`, :sha:`5bae130`) +* Added support for getting/setting lifecycle configurations on GS buckets. + (:issue:`1604`, :sha:`652fc81`) + + +Bugfixes +-------- + +* Added region support to ``bin/elbadmin``. (:issue:`1586`, + :sha:`2ffbc60`) +* Changed the mock storage to use case-insensitive headers. (:issue:`1594`, + :sha:`71849cb`) +* Added ``complex_listeners`` to ELB. (:issue:`1048`, :sha:`b782ce2`) +* Added tests for Route53's ``ResourceRecordSets``. (:sha:`fad5bde`) +* Several documentation improvements/fixes: + + * Updated CloudFront docs. (:issue:`1546`, :sha:`a811197`) + * Updated the URL explaining the use of base64 in SQS messages. + (:issue:`1596`, :sha:`00de3a2`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.9.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.9.rst new file mode 100644 index 0000000000000000000000000000000000000000..e9a0bfd2d7ee0bb36c7f58450fa81dadbd590637 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/releasenotes/v2.9.9.rst @@ -0,0 +1,50 @@ +boto v2.9.9 +=========== + +:date: 2013/07/24 + +This release updates Opsworks to add AMI & Chef 11 support, DBSubnetGroup +support in RDS & many other bugfixes. + + +Features +-------- + +* Added AMI, configuration manager & Chef 11 support to Opsworks. + (:sha:`55725fc`). +* Added ``in`` support for SQS messages. (:issue:`1593`, :sha:`e5fe1ed`) +* Added support for the ``ap-southeast-2`` region in Elasticache. + (:issue:`1607`, :sha:`9986b61`) +* Added support for block device mappings in ELB. (:issue:`1343`, :issue:`753`, + :issue:`1357`, :sha:`974a23a`) +* Added support for DBSubnetGroup in RDS. (:issue:`1500`, :sha:`01eef87`, + :sha:`45c60a0`, :sha:`c4c859e`) + + +Bugfixes +-------- + +* Fixed the canonicalization of paths on Windows. (:issue:`1609`, + :sha:`a1fa98c`) +* Fixed how ``BotoServerException`` uses ``message``. (:issue:`1353`, + :sha:`b944f4b`) +* Fixed ``DisableRollback`` always being ``True`` in a CloudFormation ``Stack``. + (:issue:`1379`, :sha:`32b3150`) +* Changed EMR instance groups to no longer require a string price (can now be + a ``Decimal``). (:issue:`1396`, :sha:`dfc39ff`) +* Altered ``Distribution._sign_string`` to accept any file-like object as well + within CloudFront. (:issue:`1349`, :sha:`8df6c14`) +* Fixed the ``detach_lb_from_subnets`` call within ELB. (:issue:`1417`, + :issue:`1418` :sha:`4a397bd`, :sha:`c11d72b`, :sha:`9e595b5`, :sha:`634469d`, + :sha:`586dd54`) +* Altered boto to obey ``no_proxy`` environment variables. (:issue:`1600`, + :issue:`1603`, :sha:`aaef5a9`) +* Fixed ELB connections to use HTTPS by default. (:issue:`1587`, :sha:`fe158c4`) +* Updated S3 to be Python 2.5 compatible again. (:issue:`1598`, :sha:`066009f`) +* All calls within SES will now return *all* DKIMTokens, instead of just one. + (:issue:`1550`, :issue:`1610`, :sha:`1a079da`, :sha:`1e82f85`, :sha:`5c8b6b8`) +* Fixed the ``logging`` parameter within ``DistributionConfig`` in CloudFront + to respect whatever is provided to the constructor. (:issue:`1457`, + :sha:`e76180d`) +* Fixed CloudSearch to no longer raise an error if a non-JSON response is received. + (:issue:`1555`, :issue:`1614`, :sha:`5e2c292`, :sha:`6510e1f`) diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/request_hook_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/request_hook_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..17054a38f7e879c076f6dbc1548d82eda1feb96b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/request_hook_tut.rst @@ -0,0 +1,61 @@ +.. _request_hook_tut.rst: + +====================================== +An Introduction to boto's request hook +====================================== + +This tutorial shows you how to use the request hook for data gathering. + +It is often important to measure things we do as developers to better +understand application performance and the interactions between components +of the system. Boto plays a key role in some of those interactions as any +client library would. + +We'll go over how to use the request hook to do some simple request logging. + +Creating a connection +--------------------- + +For this example, let's use the EC2 interface as an example. Any connection +will work (IAM, SQS, etc..):: + + >>> from boto import ec2 + >>> conn = ec2.connect_to_region('us-west-2') + +You will be using this conn object for the remainder of the tutorial to send +commands to EC2. + +Adding your own hook +-------------------- + +The hook interface is defined in boto.utils.RequestHook +The method signature looks like:: + + def handle_request_data(self, request, response, error=False): + +In boto.requestlog.py, there is an implementation of this interface which +is written to handle multiple threads sending data to a single log +writing thread. Exammining this file, you'll see a log file, queue and thread +are created, then as requests are made, the handle_request_data() method is +called. It extracts data from the request and respose object to create a log +message. That's inserted into the queue and handled by the _request_log_worker +thread. + +One thing to note is that the boto request object has an additional value +"start_time", which is a datetime.now() as of the time right before the +request was issued. This can be used along with the current time (after the +request) to calculate the duration of the request. + +To add this logger to your connection:: + + >>> from boto.requestlog import RequestLogger + >>> conn.set_request_hook(RequestLogger()) + +That's all you need to do! Now, if you make a request, like:: + + >>> conn.get_all_volumes() + +The log message produced might look something like this:: + + '2014-02-26 21:38:27', '200', '0.791542', '592', 'DescribeVolumes' + diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/route53_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/route53_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..12e0d659325bf88d702ad1e5d144bdd2a3b078c9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/route53_tut.rst @@ -0,0 +1,103 @@ +.. _route53_tut.rst: + +=========================================== +An Introduction to boto's Route53 interface +=========================================== + +This tutorial focuses on the boto interface to Route53 from Amazon Web +Services. This tutorial assumes that you have already downloaded and installed +boto. + +Route53 is a Domain Name System (DNS) web service. It can be used to route +requests to services running on AWS such as EC2 instances or load balancers, as +well as to external services. Route53 also allows you to have automated checks +to send requests where you require them. + +In this tutorial, we will be setting up our services for *example.com*. + +Creating a connection +--------------------- + +To start using Route53 you will need to create a connection to the service as +normal: + +>>> import boto.route53 +>>> conn = boto.route53.connect_to_region('us-west-2') + +You will be using this conn object for the remainder of the tutorial to send +commands to Route53. + +Working with domain names +------------------------- + +You can manipulate domains through a zone object. For example, you can create a +domain name: + +>>> zone = conn.create_zone("example.com.") + +Note that trailing dot on that domain name is significant. This is known as a +fully qualified domain name (`FQDN `_). + +>>> zone + + +You can also retrieve all your domain names: + +>>> conn.get_zones() +[] + +Or you can retrieve a single domain: + +>>> conn.get_zone("example.com.") + + +Finally, you can retrieve the list of nameservers that AWS has setup for this +domain name as follows: + +>>> zone.get_nameservers() +[u'ns-1000.awsdns-42.org.', u'ns-1001.awsdns-30.com.', u'ns-1002.awsdns-59.net.', u'ns-1003.awsdns-09.co.uk.'] + +Once you have finished configuring your domain name, you will need to change +your nameservers at your registrar to point to those nameservers for Route53 to +work. + +Setting up dumb records +----------------------- + +You can also add, update and delete records on a zone: + +>>> status = a.add_record("MX", "example.com.", "10 mail.isp.com") + +When you send a change request through, the status of the update will be +*PENDING*: + +>>> status + + +You can call the API again and ask for the current status as follows: + +>>> status.update() +'INSYNC' + +>>> status + + +When the status has changed to *INSYNC*, the change has been propagated to +remote servers + +Working with Change Sets +----------------------- + +You can also do bulk updates using ResourceRecordSets. For example updating the TTL + +>>> zone = conn.get_zone('example.com') +>>> change_set = boto.route53.record.ResourceRecordSets(conn, zone.id) +>>> for rrset in conn.get_all_rrsets(zone.id): +... u = change_set.add_change("UPSERT", rrset.name, rrset.type, ttl=3600) +... u.add_value(rrset.resource_records[0]) +... results = change_set.commit() +Done + +In this example we update the TTL to 1hr (3600 seconds) for all records recursed from +example.com. +Note: this will also change the SOA and NS records which may not be ideal for many users. diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/s3_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/s3_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..23e0350dfe69aed3ce85723aa8cb7929a1df3768 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/s3_tut.rst @@ -0,0 +1,544 @@ +.. _s3_tut: + +====================================== +An Introduction to boto's S3 interface +====================================== + +This tutorial focuses on the boto interface to the Simple Storage Service +from Amazon Web Services. This tutorial assumes that you have already +downloaded and installed boto. + +Creating a Connection +--------------------- +The first step in accessing S3 is to create a connection to the service. +There are two ways to do this in boto. The first is: + +>>> from boto.s3.connection import S3Connection +>>> conn = S3Connection('', '') + +At this point the variable conn will point to an S3Connection object. In +this example, the AWS access key and AWS secret key are passed in to the +method explicitely. Alternatively, you can set the environment variables: + +* `AWS_ACCESS_KEY_ID` - Your AWS Access Key ID +* `AWS_SECRET_ACCESS_KEY` - Your AWS Secret Access Key + +and then call the constructor without any arguments, like this: + +>>> conn = S3Connection() + +There is also a shortcut function in the boto package, called connect_s3 +that may provide a slightly easier means of creating a connection:: + + >>> import boto + >>> conn = boto.connect_s3() + +In either case, conn will point to an S3Connection object which we will +use throughout the remainder of this tutorial. + +Creating a Bucket +----------------- + +Once you have a connection established with S3, you will probably want to +create a bucket. A bucket is a container used to store key/value pairs +in S3. A bucket can hold an unlimited amount of data so you could potentially +have just one bucket in S3 for all of your information. Or, you could create +separate buckets for different types of data. You can figure all of that out +later, first let's just create a bucket. That can be accomplished like this:: + + >>> bucket = conn.create_bucket('mybucket') + Traceback (most recent call last): + File "", line 1, in ? + File "boto/connection.py", line 285, in create_bucket + raise S3CreateError(response.status, response.reason) + boto.exception.S3CreateError: S3Error[409]: Conflict + +Whoa. What happened there? Well, the thing you have to know about +buckets is that they are kind of like domain names. It's one flat name +space that everyone who uses S3 shares. So, someone has already create +a bucket called "mybucket" in S3 and that means no one else can grab that +bucket name. So, you have to come up with a name that hasn't been taken yet. +For example, something that uses a unique string as a prefix. Your +AWS_ACCESS_KEY (NOT YOUR SECRET KEY!) could work but I'll leave it to +your imagination to come up with something. I'll just assume that you +found an acceptable name. + +The create_bucket method will create the requested bucket if it does not +exist or will return the existing bucket if it does exist. + +Creating a Bucket In Another Location +------------------------------------- + +The example above assumes that you want to create a bucket in the +standard US region. However, it is possible to create buckets in +other locations. To do so, first import the Location object from the +boto.s3.connection module, like this:: + + >>> from boto.s3.connection import Location + >>> print '\n'.join(i for i in dir(Location) if i[0].isupper()) + APNortheast + APSoutheast + APSoutheast2 + DEFAULT + EU + SAEast + USWest + USWest2 + +As you can see, the Location object defines a number of possible locations. By +default, the location is the empty string which is interpreted as the US +Classic Region, the original S3 region. However, by specifying another +location at the time the bucket is created, you can instruct S3 to create the +bucket in that location. For example:: + + >>> conn.create_bucket('mybucket', location=Location.EU) + +will create the bucket in the EU region (assuming the name is available). + +Storing Data +---------------- + +Once you have a bucket, presumably you will want to store some data +in it. S3 doesn't care what kind of information you store in your objects +or what format you use to store it. All you need is a key that is unique +within your bucket. + +The Key object is used in boto to keep track of data stored in S3. To store +new data in S3, start by creating a new Key object:: + + >>> from boto.s3.key import Key + >>> k = Key(bucket) + >>> k.key = 'foobar' + >>> k.set_contents_from_string('This is a test of S3') + +The net effect of these statements is to create a new object in S3 with a +key of "foobar" and a value of "This is a test of S3". To validate that +this worked, quit out of the interpreter and start it up again. Then:: + + >>> import boto + >>> c = boto.connect_s3() + >>> b = c.get_bucket('mybucket') # substitute your bucket name here + >>> from boto.s3.key import Key + >>> k = Key(b) + >>> k.key = 'foobar' + >>> k.get_contents_as_string() + 'This is a test of S3' + +So, we can definitely store and retrieve strings. A more interesting +example may be to store the contents of a local file in S3 and then retrieve +the contents to another local file. + +:: + + >>> k = Key(b) + >>> k.key = 'myfile' + >>> k.set_contents_from_filename('foo.jpg') + >>> k.get_contents_to_filename('bar.jpg') + +There are a couple of things to note about this. When you send data to +S3 from a file or filename, boto will attempt to determine the correct +mime type for that file and send it as a Content-Type header. The boto +package uses the standard mimetypes package in Python to do the mime type +guessing. The other thing to note is that boto does stream the content +to and from S3 so you should be able to send and receive large files without +any problem. + +When fetching a key that already exists, you have two options. If you're +uncertain whether a key exists (or if you need the metadata set on it, you can +call ``Bucket.get_key(key_name_here)``. However, if you're sure a key already +exists within a bucket, you can skip the check for a key on the server. + +:: + + >>> import boto + >>> c = boto.connect_s3() + >>> b = c.get_bucket('mybucket') # substitute your bucket name here + + # Will hit the API to check if it exists. + >>> possible_key = b.get_key('mykey') # substitute your key name here + + # Won't hit the API. + >>> key_we_know_is_there = b.get_key('mykey', validate=False) + + +Storing Large Data +------------------ + +At times the data you may want to store will be hundreds of megabytes or +more in size. S3 allows you to split such files into smaller components. +You upload each component in turn and then S3 combines them into the final +object. While this is fairly straightforward, it requires a few extra steps +to be taken. The example below makes use of the FileChunkIO module, so +``pip install FileChunkIO`` if it isn't already installed. + +:: + + >>> import math, os + >>> import boto + >>> from filechunkio import FileChunkIO + + # Connect to S3 + >>> c = boto.connect_s3() + >>> b = c.get_bucket('mybucket') + + # Get file info + >>> source_path = 'path/to/your/file.ext' + >>> source_size = os.stat(source_path).st_size + + # Create a multipart upload request + >>> mp = b.initiate_multipart_upload(os.path.basename(source_path)) + + # Use a chunk size of 50 MiB (feel free to change this) + >>> chunk_size = 52428800 + >>> chunk_count = int(math.ceil(source_size / float(chunk_size))) + + # Send the file parts, using FileChunkIO to create a file-like object + # that points to a certain byte range within the original file. We + # set bytes to never exceed the original file size. + >>> for i in range(chunk_count): + >>> offset = chunk_size * i + >>> bytes = min(chunk_size, source_size - offset) + >>> with FileChunkIO(source_path, 'r', offset=offset, + bytes=bytes) as fp: + >>> mp.upload_part_from_file(fp, part_num=i + 1) + + # Finish the upload + >>> mp.complete_upload() + +It is also possible to upload the parts in parallel using threads. The +``s3put`` script that ships with Boto provides an example of doing so +using a thread pool. + +Note that if you forget to call either ``mp.complete_upload()`` or +``mp.cancel_upload()`` you will be left with an incomplete upload and +charged for the storage consumed by the uploaded parts. A call to +``bucket.get_all_multipart_uploads()`` can help to show lost multipart +upload parts. + + +Accessing A Bucket +------------------ + +Once a bucket exists, you can access it by getting the bucket. For example:: + + >>> mybucket = conn.get_bucket('mybucket') # Substitute in your bucket name + >>> mybucket.list() + ...listing of keys in the bucket... + +By default, this method tries to validate the bucket's existence. You can +override this behavior by passing ``validate=False``.:: + + >>> nonexistent = conn.get_bucket('i-dont-exist-at-all', validate=False) + +.. versionchanged:: 2.25.0 +.. warning:: + + If ``validate=False`` is passed, no request is made to the service (no + charge/communication delay). This is only safe to do if you are **sure** + the bucket exists. + + If the default ``validate=True`` is passed, a request is made to the + service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched + a list of keys (but with a max limit set to ``0``, always returning an empty + list) in the bucket (& included better error messages), at an + increased expense. As of Boto v2.25.0, this now performs a HEAD request + (less expensive but worse error messages). + + If you were relying on parsing the error message before, you should call + something like:: + + bucket = conn.get_bucket('', validate=False) + bucket.get_all_keys(maxkeys=0) + +If the bucket does not exist, a ``S3ResponseError`` will commonly be thrown. If +you'd rather not deal with any exceptions, you can use the ``lookup`` method.:: + + >>> nonexistent = conn.lookup('i-dont-exist-at-all') + >>> if nonexistent is None: + ... print "No such bucket!" + ... + No such bucket! + + +Deleting A Bucket +----------------- + +Removing a bucket can be done using the ``delete_bucket`` method. For example:: + + >>> conn.delete_bucket('mybucket') # Substitute in your bucket name + +The bucket must be empty of keys or this call will fail & an exception will be +raised. You can remove a non-empty bucket by doing something like:: + + >>> full_bucket = conn.get_bucket('bucket-to-delete') + # It's full of keys. Delete them all. + >>> for key in full_bucket.list(): + ... key.delete() + ... + # The bucket is empty now. Delete it. + >>> conn.delete_bucket('bucket-to-delete') + +.. warning:: + + This method can cause data loss! Be very careful when using it. + + Additionally, be aware that using the above method for removing all keys + and deleting the bucket involves a request for each key. As such, it's not + particularly fast & is very chatty. + +Listing All Available Buckets +----------------------------- +In addition to accessing specific buckets via the create_bucket method +you can also get a list of all available buckets that you have created. + +:: + + >>> rs = conn.get_all_buckets() + +This returns a ResultSet object (see the SQS Tutorial for more info on +ResultSet objects). The ResultSet can be used as a sequence or list type +object to retrieve Bucket objects. + +:: + + >>> len(rs) + 11 + >>> for b in rs: + ... print b.name + ... + + >>> b = rs[0] + +Setting / Getting the Access Control List for Buckets and Keys +-------------------------------------------------------------- +The S3 service provides the ability to control access to buckets and keys +within s3 via the Access Control List (ACL) associated with each object in +S3. There are two ways to set the ACL for an object: + +1. Create a custom ACL that grants specific rights to specific users. At the + moment, the users that are specified within grants have to be registered + users of Amazon Web Services so this isn't as useful or as general as it + could be. + +2. Use a "canned" access control policy. There are four canned policies + defined: + + a. private: Owner gets FULL_CONTROL. No one else has any access rights. + b. public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access. + c. public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access. + d. authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access. + +To set a canned ACL for a bucket, use the set_acl method of the Bucket object. +The argument passed to this method must be one of the four permissable +canned policies named in the list CannedACLStrings contained in acl.py. +For example, to make a bucket readable by anyone: + +>>> b.set_acl('public-read') + +You can also set the ACL for Key objects, either by passing an additional +argument to the above method: + +>>> b.set_acl('public-read', 'foobar') + +where 'foobar' is the key of some object within the bucket b or you can +call the set_acl method of the Key object: + +>>> k.set_acl('public-read') + +You can also retrieve the current ACL for a Bucket or Key object using the +get_acl object. This method parses the AccessControlPolicy response sent +by S3 and creates a set of Python objects that represent the ACL. + +:: + + >>> acp = b.get_acl() + >>> acp + + >>> acp.acl + + >>> acp.acl.grants + [] + >>> for grant in acp.acl.grants: + ... print grant.permission, grant.display_name, grant.email_address, grant.id + ... + FULL_CONTROL + +The Python objects representing the ACL can be found in the acl.py module +of boto. + +Both the Bucket object and the Key object also provide shortcut +methods to simplify the process of granting individuals specific +access. For example, if you want to grant an individual user READ +access to a particular object in S3 you could do the following:: + + >>> key = b.lookup('mykeytoshare') + >>> key.add_email_grant('READ', 'foo@bar.com') + +The email address provided should be the one associated with the users +AWS account. There is a similar method called add_user_grant that accepts the +canonical id of the user rather than the email address. + +Setting/Getting Metadata Values on Key Objects +---------------------------------------------- +S3 allows arbitrary user metadata to be assigned to objects within a bucket. +To take advantage of this S3 feature, you should use the set_metadata and +get_metadata methods of the Key object to set and retrieve metadata associated +with an S3 object. For example:: + + >>> k = Key(b) + >>> k.key = 'has_metadata' + >>> k.set_metadata('meta1', 'This is the first metadata value') + >>> k.set_metadata('meta2', 'This is the second metadata value') + >>> k.set_contents_from_filename('foo.txt') + +This code associates two metadata key/value pairs with the Key k. To retrieve +those values later:: + + >>> k = b.get_key('has_metadata') + >>> k.get_metadata('meta1') + 'This is the first metadata value' + >>> k.get_metadata('meta2') + 'This is the second metadata value' + >>> + +Setting/Getting/Deleting CORS Configuration on a Bucket +------------------------------------------------------- + +Cross-origin resource sharing (CORS) defines a way for client web +applications that are loaded in one domain to interact with resources +in a different domain. With CORS support in Amazon S3, you can build +rich client-side web applications with Amazon S3 and selectively allow +cross-origin access to your Amazon S3 resources. + +To create a CORS configuration and associate it with a bucket:: + + >>> from boto.s3.cors import CORSConfiguration + >>> cors_cfg = CORSConfiguration() + >>> cors_cfg.add_rule(['PUT', 'POST', 'DELETE'], 'https://www.example.com', allowed_header='*', max_age_seconds=3000, expose_header='x-amz-server-side-encryption') + >>> cors_cfg.add_rule('GET', '*') + +The above code creates a CORS configuration object with two rules. + +* The first rule allows cross-origin PUT, POST, and DELETE requests from + the https://www.example.com/ origin. The rule also allows all headers + in preflight OPTIONS request through the Access-Control-Request-Headers + header. In response to any preflight OPTIONS request, Amazon S3 will + return any requested headers. +* The second rule allows cross-origin GET requests from all origins. + +To associate this configuration with a bucket:: + + >>> import boto + >>> c = boto.connect_s3() + >>> bucket = c.lookup('mybucket') + >>> bucket.set_cors(cors_cfg) + +To retrieve the CORS configuration associated with a bucket:: + + >>> cors_cfg = bucket.get_cors() + +And, finally, to delete all CORS configurations from a bucket:: + + >>> bucket.delete_cors() + +Transitioning Objects to Glacier +-------------------------------- + +You can configure objects in S3 to transition to Glacier after a period of +time. This is done using lifecycle policies. A lifecycle policy can also +specify that an object should be deleted after a period of time. Lifecycle +configurations are assigned to buckets and require these parameters: + +* The object prefix that identifies the objects you are targeting. +* The action you want S3 to perform on the identified objects. +* The date (or time period) when you want S3 to perform these actions. + +For example, given a bucket ``s3-glacier-boto-demo``, we can first retrieve the +bucket:: + + >>> import boto + >>> c = boto.connect_s3() + >>> bucket = c.get_bucket('s3-glacier-boto-demo') + +Then we can create a lifecycle object. In our example, we want all objects +under ``logs/*`` to transition to Glacier 30 days after the object is created. + +:: + + >>> from boto.s3.lifecycle import Lifecycle, Transition, Rule + >>> to_glacier = Transition(days=30, storage_class='GLACIER') + >>> rule = Rule('ruleid', 'logs/', 'Enabled', transition=to_glacier) + >>> lifecycle = Lifecycle() + >>> lifecycle.append(rule) + +.. note:: + + For API docs for the lifecycle objects, see :py:mod:`boto.s3.lifecycle` + +We can now configure the bucket with this lifecycle policy:: + + >>> bucket.configure_lifecycle(lifecycle) + True + +You can also retrieve the current lifecycle policy for the bucket:: + + >>> current = bucket.get_lifecycle_config() + >>> print current[0].transition + + +When an object transitions to Glacier, the storage class will be +updated. This can be seen when you **list** the objects in a bucket:: + + >>> for key in bucket.list(): + ... print key, key.storage_class + ... + GLACIER + +You can also use the prefix argument to the ``bucket.list`` method:: + + >>> print list(b.list(prefix='logs/testlog1.log'))[0].storage_class + u'GLACIER' + + +Restoring Objects from Glacier +------------------------------ + +Once an object has been transitioned to Glacier, you can restore the object +back to S3. To do so, you can use the :py:meth:`boto.s3.key.Key.restore` +method of the key object. +The ``restore`` method takes an integer that specifies the number of days +to keep the object in S3. + +:: + + >>> import boto + >>> c = boto.connect_s3() + >>> bucket = c.get_bucket('s3-glacier-boto-demo') + >>> key = bucket.get_key('logs/testlog1.log') + >>> key.restore(days=5) + +It takes about 4 hours for a restore operation to make a copy of the archive +available for you to access. While the object is being restored, the +``ongoing_restore`` attribute will be set to ``True``:: + + + >>> key = bucket.get_key('logs/testlog1.log') + >>> print key.ongoing_restore + True + +When the restore is finished, this value will be ``False`` and the expiry +date of the object will be non ``None``:: + + >>> key = bucket.get_key('logs/testlog1.log') + >>> print key.ongoing_restore + False + >>> print key.expiry_date + "Fri, 21 Dec 2012 00:00:00 GMT" + + +.. note:: If there is no restore operation either in progress or completed, + the ``ongoing_restore`` attribute will be ``None``. + +Once the object is restored you can then download the contents:: + + >>> key.get_contents_to_filename('testlog1.log') diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/security_groups.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/security_groups.rst new file mode 100644 index 0000000000000000000000000000000000000000..0b959c4816c6a30af0c813a9eae27c465a65ac7a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/security_groups.rst @@ -0,0 +1,82 @@ +.. _security_groups: + +=================== +EC2 Security Groups +=================== + +Amazon defines a security group as: + +"A security group is a named collection of access rules. These access rules + specify which ingress, i.e. incoming, network traffic should be delivered + to your instance." + +To get a listing of all currently defined security groups:: + + >>> rs = conn.get_all_security_groups() + >>> print rs + [SecurityGroup:appserver, SecurityGroup:default, SecurityGroup:vnc, SecurityGroup:webserver] + +Each security group can have an arbitrary number of rules which represent +different network ports which are being enabled. To find the rules for a +particular security group, use the rules attribute:: + + >>> sg = rs[1] + >>> sg.name + u'default' + >>> sg.rules + [IPPermissions:tcp(0-65535), + IPPermissions:udp(0-65535), + IPPermissions:icmp(-1--1), + IPPermissions:tcp(22-22), + IPPermissions:tcp(80-80)] + +In addition to listing the available security groups you can also create +a new security group. I'll follow through the "Three Tier Web Service" +example included in the EC2 Developer's Guide for an example of how to +create security groups and add rules to them. + +First, let's create a group for our Apache web servers that allows HTTP +access to the world:: + + >>> web = conn.create_security_group('apache', 'Our Apache Group') + >>> web + SecurityGroup:apache + >>> web.authorize('tcp', 80, 80, '0.0.0.0/0') + True + +The first argument is the ip protocol which can be one of; tcp, udp or icmp. +The second argument is the FromPort or the beginning port in the range, the +third argument is the ToPort or the ending port in the range and the last +argument is the CIDR IP range to authorize access to. + +Next we create another group for the app servers:: + + >>> app = conn.create_security_group('appserver', 'The application tier') + +We then want to grant access between the web server group and the app +server group. So, rather than specifying an IP address as we did in the +last example, this time we will specify another SecurityGroup object.: + + >>> app.authorize(src_group=web) + True + +Now, to verify that the web group now has access to the app servers, we want to +temporarily allow SSH access to the web servers from our computer. Let's +say that our IP address is 192.168.1.130 as it is in the EC2 Developer +Guide. To enable that access:: + + >>> web.authorize(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip='192.168.1.130/32') + True + +Now that this access is authorized, we could ssh into an instance running in +the web group and then try to telnet to specific ports on servers in the +appserver group, as shown in the EC2 Developer's Guide. When this testing is +complete, we would want to revoke SSH access to the web server group, like this:: + + >>> web.rules + [IPPermissions:tcp(80-80), + IPPermissions:tcp(22-22)] + >>> web.revoke('tcp', 22, 22, cidr_ip='192.168.1.130/32') + True + >>> web.rules + [IPPermissions:tcp(80-80)] \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/ses_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/ses_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..d19a4e36c1b63842369fef8b1dd290a8d4866ecf --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/ses_tut.rst @@ -0,0 +1,172 @@ +.. ses_tut: + +============================= +Simple Email Service Tutorial +============================= + +This tutorial focuses on the boto interface to AWS' `Simple Email Service (SES) `_. +This tutorial assumes that you have boto already downloaded and installed. + +.. _SES: http://aws.amazon.com/ses/ + +Creating a Connection +--------------------- + +The first step in accessing SES is to create a connection to the service. +To do so, the most straight forward way is the following:: + + >>> import boto.ses + >>> conn = boto.ses.connect_to_region( + 'us-west-2', + aws_access_key_id='', + aws_secret_access_key='') + >>> conn + SESConnection:email.us-west-2.amazonaws.com + +Bear in mind that if you have your credentials in boto config in your home +directory, the two keyword arguments in the call above are not needed. More +details on configuration can be fond in :doc:`boto_config_tut`. + +The :py:func:`boto.ses.connect_to_region` functions returns a +:py:class:`boto.ses.connection.SESConnection` instance, which is a the boto API +for working with SES. + +Notes on Sending +---------------- + +It is important to keep in mind that while emails appear to come "from" the +address that you specify via Reply-To, the sending is done through Amazon. +Some clients do pick up on this disparity, and leave a note on emails. + +Verifying a Sender Email Address +-------------------------------- + +Before you can send email "from" an address, you must prove that you have +access to the account. When you send a validation request, an email is sent +to the address with a link in it. Clicking on the link validates the address +and adds it to your SES account. Here's how to send the validation email:: + + >>> conn.verify_email_address('some@address.com') + { + 'VerifyEmailAddressResponse': { + 'ResponseMetadata': { + 'RequestId': '4a974fd5-56c2-11e1-ad4c-c1f08c91d554' + } + } + } + +After a short amount of time, you'll find an email with the validation +link inside. Click it, and this address may be used to send emails. + +Listing Verified Addresses +-------------------------- + +If you'd like to list the addresses that are currently verified on your +SES account, use +:py:meth:`list_verified_email_addresses `:: + + >>> conn.list_verified_email_addresses() + { + 'ListVerifiedEmailAddressesResponse': { + 'ListVerifiedEmailAddressesResult': { + 'VerifiedEmailAddresses': [ + 'some@address.com', + 'another@address.com' + ] + }, + 'ResponseMetadata': { + 'RequestId': '2ab45c18-56c3-11e1-be66-ffd2a4549d70' + } + } + } + +Deleting a Verified Address +--------------------------- + +In the event that you'd like to remove an email address from your account, +use +:py:meth:`delete_verified_email_address `:: + + >>> conn.delete_verified_email_address('another@address.com') + +Sending an Email +---------------- + +Sending an email is done via +:py:meth:`send_email `:: + + >>> conn.send_email( + 'some@address.com', + 'Your subject', + 'Body here', + ['recipient-address-1@gmail.com']) + { + 'SendEmailResponse': { + 'ResponseMetadata': { + 'RequestId': '4743c2b7-56c3-11e1-bccd-c99bd68002fd' + }, + 'SendEmailResult': { + 'MessageId': '000001357a177192-7b894025-147a-4705-8455-7c880b0c8270-000000' + } + } + } + +If you're wanting to send a multipart MIME email, see the reference for +:py:meth:`send_raw_email `, +which is a bit more of a low-level alternative. + +Checking your Send Quota +------------------------ + +Staying within your quota is critical, since the upper limit is a hard cap. +Once you have hit your quota, no further email may be sent until enough +time elapses to where your 24 hour email count (rolling continuously) is +within acceptable ranges. Use +:py:meth:`get_send_quota `:: + + >>> conn.get_send_quota() + { + 'GetSendQuotaResponse': { + 'GetSendQuotaResult': { + 'Max24HourSend': '100000.0', + 'SentLast24Hours': '181.0', + 'MaxSendRate': '28.0' + }, + 'ResponseMetadata': { + 'RequestId': u'8a629245-56c4-11e1-9c53-9d5f4d2cc8d3' + } + } + } + +Checking your Send Statistics +----------------------------- + +In order to fight spammers and ensure quality mail is being sent from SES, +Amazon tracks bounces, rejections, and complaints. This is done via +:py:meth:`get_send_statistics `. +Please be warned that the output is extremely verbose, to the point +where we'll just show a short excerpt here:: + + >>> conn.get_send_statistics() + { + 'GetSendStatisticsResponse': { + 'GetSendStatisticsResult': { + 'SendDataPoints': [ + { + 'Complaints': '0', + 'Timestamp': '2012-02-13T05:02:00Z', + 'DeliveryAttempts': '8', + 'Bounces': '0', + 'Rejects': '0' + }, + { + 'Complaints': '0', + 'Timestamp': '2012-02-13T05:17:00Z', + 'DeliveryAttempts': '12', + 'Bounces': '0', + 'Rejects': '0' + } + ] + } + } + } diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/simpledb_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/simpledb_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..6ecc087f791c4db23d8efbbd17ebad96c1d61ac5 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/simpledb_tut.rst @@ -0,0 +1,198 @@ +.. simpledb_tut: + +============================================ +An Introduction to boto's SimpleDB interface +============================================ + +This tutorial focuses on the boto interface to AWS' SimpleDB_. This tutorial +assumes that you have boto already downloaded and installed. + +.. _SimpleDB: http://aws.amazon.com/simpledb/ + +.. note:: + + If you're starting a new application, you might want to consider using + :doc:`DynamoDB2 ` instead, as it has a more comprehensive + feature set & has guaranteed performance throughput levels. + +Creating a Connection +--------------------- +The first step in accessing SimpleDB is to create a connection to the service. +To do so, the most straight forward way is the following:: + + >>> import boto.sdb + >>> conn = boto.sdb.connect_to_region( + ... 'us-west-2', + ... aws_access_key_id='', + ... aws_secret_access_key='') + >>> conn + SDBConnection:sdb.amazonaws.com + >>> + +Bear in mind that if you have your credentials in boto config in your home +directory, the two keyword arguments in the call above are not needed. Also +important to note is that just as any other AWS service, SimpleDB is +region-specific and as such you might want to specify which region to connect +to, by default, it'll connect to the US-EAST-1 region. + +Creating Domains +---------------- +Arguably, once you have your connection established, you'll want to create one or more dmains. +Creating new domains is a fairly straight forward operation. To do so, you can proceed as follows:: + + >>> conn.create_domain('test-domain') + Domain:test-domain + >>> + >>> conn.create_domain('test-domain-2') + Domain:test-domain + >>> + +Please note that SimpleDB, unlike its newest sibling DynamoDB, is truly and completely schema-less. +Thus, there's no need specify domain keys or ranges. + +Listing All Domains +------------------- +Unlike DynamoDB or other database systems, SimpleDB uses the concept of 'domains' instead of tables. +So, to list all your domains for your account in a region, you can simply do as follows:: + + >>> domains = conn.get_all_domains() + >>> domains + [Domain:test-domain, Domain:test-domain-2] + >>> + +The get_all_domains() method returns a :py:class:`boto.resultset.ResultSet` containing +all :py:class:`boto.sdb.domain.Domain` objects associated with +this connection's Access Key ID for that region. + +Retrieving a Domain (by name) +----------------------------- +If you wish to retrieve a specific domain whose name is known, you can do so as follows:: + + >>> dom = conn.get_domain('test-domain') + >>> dom + Domain:test-domain + >>> + +The get_domain call has an optional validate parameter, which defaults to True. This will make sure to raise +an exception if the domain you are looking for doesn't exist. If you set it to false, it will return a +:py:class:`Domain ` object blindly regardless of its existence. + +Getting Domain Metadata +------------------------ +There are times when you might want to know your domains' machine usage, aprox. item count and other such data. +To this end, boto offers a simple and convenient way to do so as shown below:: + + >>> domain_meta = conn.domain_metadata(dom) + >>> domain_meta + + >>> dir(domain_meta) + ['BoxUsage', 'DomainMetadataResponse', 'DomainMetadataResult', 'RequestId', 'ResponseMetadata', + '__doc__', '__init__', '__module__', 'attr_name_count', 'attr_names_size', 'attr_value_count', 'attr_values_size', + 'domain', 'endElement', 'item_count', 'item_names_size', 'startElement', 'timestamp'] + >>> domain_meta.item_count + 0 + >>> + +Please bear in mind that while in the example above we used a previously retrieved domain object as the parameter, you +can retrieve the domain metadata via its name (string). + +Adding Items (and attributes) +----------------------------- +Once you have your domain setup, presumably, you'll want to start adding items to it. +In its most straight forward form, you need to provide a name for the item -- think of it +as a record id -- and a collection of the attributes you want to store in the item (often a Dictionary-like object). +So, adding an item to a domain looks as follows:: + + >>> item_name = 'ABC_123' + >>> item_attrs = {'Artist': 'The Jackson 5', 'Genera':'Pop'} + >>> dom.put_attributes(item_name, item_attrs) + True + >>> + +Now let's check if it worked:: + + >>> domain_meta = conn.domain_metadata(dom) + >>> domain_meta.item_count + 1 + >>> + + +Batch Adding Items (and attributes) +----------------------------------- +You can also add a number of items at the same time in a similar fashion. All you have to provide to the batch_put_attributes() method +is a Dictionary-like object with your items and their respective attributes, as follows:: + + >>> items = {'item1':{'attr1':'val1'},'item2':{'attr2':'val2'}} + >>> dom.batch_put_attributes(items) + True + >>> + +Now, let's check the item count once again:: + + >>> domain_meta = conn.domain_metadata(dom) + >>> domain_meta.item_count + 3 + >>> + +A few words of warning: both batch_put_attributes() and put_item(), by default, will overwrite the values of the attributes if both +the item and attribute already exist. If the item exists, but not the attributes, it will append the new attributes to the +attribute list of that item. If you do not wish these methods to behave in that manner, simply supply them with a 'replace=False' +parameter. + + +Retrieving Items +----------------- +To retrieve an item along with its attributes is a fairly straight forward operation and can be accomplished as follows:: + + >>> dom.get_item('item1') + {u'attr1': u'val1'} + >>> + +Since SimpleDB works in an "eventual consistency" manner, we can also request a forced consistent read (though this will +invariably adversely affect read performance). The way to accomplish that is as shown below:: + + >>> dom.get_item('item1', consistent_read=True) + {u'attr1': u'val1'} + >>> + +Retrieving One or More Items +---------------------------- +Another way to retrieve items is through boto's select() method. This method, at the bare minimum, requires a standard SQL select query string +and you would do something along the lines of:: + + >>> query = 'select * from `test-domain` where attr1="val1"' + >>> rs = dom.select(query) + >>> for j in rs: + ... print 'o hai' + ... + o hai + >>> + +This method returns a ResultSet collection you can iterate over. + +Updating Item Attributes +------------------------ +The easiest way to modify an item's attributes is by manipulating the item's attributes and then saving those changes. For example:: + + >>> item = dom.get_item('item1') + >>> item['attr1'] = 'val_changed' + >>> item.save() + + +Deleting Items (and its attributes) +----------------------------------- +Deleting an item is a very simple operation. All you are required to provide is either the name of the item or an item object to the +delete_item() method, boto will take care of the rest:: + + >>>dom.delete_item(item) + >>>True + + + +Deleting Domains +----------------------------------- +To delete a domain and all items under it (i.e. be very careful), you can do it as follows:: + + >>> conn.delete_domain('test-domain') + True + >>> diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/sqs_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/sqs_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..06cdc4403b77349e4296808e9c3dc656e44aae77 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/sqs_tut.rst @@ -0,0 +1,276 @@ +.. _sqs_tut: + +======================================= +An Introduction to boto's SQS interface +======================================= + +This tutorial focuses on the boto interface to the Simple Queue Service +from Amazon Web Services. This tutorial assumes that you have boto already +downloaded and installed. + +Creating a Connection +--------------------- +The first step in accessing SQS is to create a connection to the service. +The recommended method of doing this is as follows:: + + >>> import boto.sqs + >>> conn = boto.sqs.connect_to_region( + ... "us-west-2", + ... aws_access_key_id='', + ... aws_secret_access_key='') + +At this point the variable conn will point to an SQSConnection object in the +US-WEST-2 region. Bear in mind that just as any other AWS service, SQS is +region-specific. In this example, the AWS access key and AWS secret key are +passed in to the method explicitly. Alternatively, you can set the environment +variables: + +* ``AWS_ACCESS_KEY_ID`` - Your AWS Access Key ID +* ``AWS_SECRET_ACCESS_KEY`` - Your AWS Secret Access Key + +and then simply call:: + + >>> import boto.sqs + >>> conn = boto.sqs.connect_to_region("us-west-2") + +In either case, conn will point to an SQSConnection object which we will +use throughout the remainder of this tutorial. + +Creating a Queue +---------------- +Once you have a connection established with SQS, you will probably want to +create a queue. In its simplest form, that can be accomplished as follows:: + + >>> q = conn.create_queue('myqueue') + +The create_queue method will create (and return) the requested queue if it does not +exist or will return the existing queue if it does. There is an +optional parameter to create_queue called visibility_timeout. This basically +controls how long a message will remain invisible to other queue readers +once it has been read (see SQS documentation for more detailed explanation). +If this is not explicitly specified the queue will be created with whatever +default value SQS provides (currently 30 seconds). If you would like to +specify another value, you could do so like this:: + + >>> q = conn.create_queue('myqueue', 120) + +This would establish a default visibility timeout for this queue of 120 +seconds. As you will see later on, this default value for the queue can +also be overridden each time a message is read from the queue. If you want +to check what the default visibility timeout is for a queue:: + + >>> q.get_timeout() + 30 + +Listing all Queues +------------------ + +To retrieve a list of the queues for your account in the current region:: + + >>> conn.get_all_queues() + [ + Queue(https://queue.amazonaws.com/411358162645/myqueue), + Queue(https://queue.amazonaws.com/411358162645/another_queue), + Queue(https://queue.amazonaws.com/411358162645/another_queue2) + ] + +This will leave you with a list of all of your :py:class:`boto.sqs.queue.Queue` +instances. Alternatively, if you wanted to only list the queues that started +with ``'another'``:: + + >>> conn.get_all_queues(prefix='another') + [ + Queue(https://queue.amazonaws.com/411358162645/another_queue), + Queue(https://queue.amazonaws.com/411358162645/another_queue2) + ] + +Getting a Queue (by name) +------------------------- +If you wish to explicitly retrieve an existing queue and the name of the queue is known, +you can retrieve the queue as follows:: + + >>> my_queue = conn.get_queue('myqueue') + Queue(https://queue.amazonaws.com/411358162645/myqueue) + +This leaves you with a single :py:class:`boto.sqs.queue.Queue`, which abstracts +the SQS Queue named 'myqueue'. + +Writing Messages +---------------- +Once you have a queue setup, presumably you will want to write some messages +to it. SQS doesn't care what kind of information you store in your messages +or what format you use to store it. As long as the amount of data per +message is less than or equal to 256Kb, SQS won't complain. + +So, first we need to create a Message object:: + +>>> from boto.sqs.message import Message +>>> m = Message() +>>> m.set_body('This is my first message.') +>>> q.write(m) + +The write method will return the ``Message`` object. The ``id`` and +``md5`` attribute of the ``Message`` object will be updated with the +values of the message that was written to the queue. + +Arbitrary message attributes can be defined by setting a simple dictionary +of values on the message object:: + + >>> m = Message() + >>> m.message_attributes = { + ... "name1": { + ... "data_type": "String", + ... "string_value": "I am a string" + ... }, + ... "name2": { + ... "data_type": "Number", + ... "string_value": "12" + ... } + ... } + +Note that by default, these arbitrary attributes are not returned when +you request messages from a queue. Instead, you must request them via +the ``message_attributes`` parameter (see below). + +If the message cannot be written an ``SQSError`` exception will be raised. + +Writing Messages (Custom Format) +-------------------------------- +The technique above will work only if you use boto's default Message payload format; +however, you may have a lot of specific requirements around the format of +the message data. For example, you may want to store one big string or you might +want to store something that looks more like RFC822 messages or you might want +to store a binary payload such as pickled Python objects. + +The way boto deals with this issue is to define a simple Message object that +treats the message data as one big string which you can set and get. If that +Message object meets your needs, you're good to go. However, if you need to +incorporate different behavior in your message or handle different types of +data you can create your own Message class. You just need to register that +class with the boto queue object so that it knows that, when you read a message from the +queue, it should create one of your message objects rather than the +default boto Message object. To register your message class, you would:: + +>>> import MyMessage +>>> q.set_message_class(MyMessage) +>>> m = MyMessage() +>>> m.set_body('This is my first message.') +>>> q.write(m) + +where MyMessage is the class definition for your message class. Your +message class should subclass the boto Message because there is a small +bit of Python magic happening in the ``__setattr__`` method of the boto Message +class. + +Reading Messages +---------------- + +So, now we have a message in our queue. How would we go about reading it? +Here's one way: + +>>> rs = q.get_messages() +>>> len(rs) +1 +>>> m = rs[0] +>>> m.get_body() +u'This is my first message' + +The get_messages method also returns a ResultSet object as described +above. In addition to the special attributes that we already talked +about the ResultSet object also contains any results returned by the +request. To get at the results you can treat the ResultSet as a +sequence object (e.g. a list). We can check the length (how many results) +and access particular items within the list using the slice notation +familiar to Python programmers. + +At this point, we have read the message from the queue and SQS will make +sure that this message remains invisible to other readers of the queue +until the visibility timeout period for the queue expires. If you delete +the message before the timeout period expires then no one else will ever see +the message again. However, if you don't delete it (maybe because your reader crashed +or failed in some way, for example) it will magically reappear in my queue +for someone else to read. If you aren't happy with the default visibility +timeout defined for the queue, you can override it when you read a message: + +>>> q.get_messages(visibility_timeout=60) + +This means that regardless of what the default visibility timeout is for +the queue, this message will remain invisible to other readers for 60 +seconds. + +The get_messages method can also return more than a single message. By +passing a num_messages parameter (defaults to 1) you can control the maximum +number of messages that will be returned by the method. To show this +feature off, first let's load up a few more messages. + + >>> for i in range(1, 11): + ... m = Message() + ... m.set_body('This is message %d' % i) + ... q.write(m) + ... + >>> rs = q.get_messages(10) + >>> len(rs) + 10 + +Don't be alarmed if the length of the result set returned by the get_messages +call is less than 10. Sometimes it takes some time for new messages to become +visible in the queue. Give it a minute or two and they will all show up. + +If you want a slightly simpler way to read messages from a queue, you +can use the read method. It will either return the message read or +it will return None if no messages were available. You can also pass +a visibility_timeout parameter to read, if you desire: + +>>> m = q.read(60) +>>> m.get_body() +u'This is my first message' + +Reading Message Attributes +-------------------------- +By default, no arbitrary message attributes are returned when requesting +messages. You can change this behavior by specifying the names of attributes +you wish to have returned:: + +>>> rs = queue.get_messages(message_attributes=['name1', 'name2']) +>>> print rs[0].message_attributes['name1']['string_value'] +'I am a string' + +A special value of ``All`` or ``.*`` may be passed to return all available +message attributes. + +Deleting Messages and Queues +---------------------------- +As stated above, messages are never deleted by the queue unless explicitly told to do so. +To remove a message from a queue: + +>>> q.delete_message(m) +[] + +If I want to delete the entire queue, I would use: + +>>> conn.delete_queue(q) + +This will delete the queue, even if there are still messages within the queue. + +Additional Information +---------------------- +The above tutorial covers the basic operations of creating queues, writing messages, +reading messages, deleting messages, and deleting queues. There are a +few utility methods in boto that might be useful as well. For example, +to count the number of messages in a queue: + +>>> q.count() +10 + +Removing all messages in a queue is as simple as calling purge: + +>>> q.purge() + +Be REAL careful with that one! Finally, if you want to dump all of the +messages in a queue to a local file: + +>>> q.dump('messages.txt', sep='\n------------------\n') + +This will read all of the messages in the queue and write the bodies of +each of the messages to the file messages.txt. The optional ``sep`` argument +is a separator that will be printed between each message body in the file. diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/support_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/support_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..8dbc4fcfdd994dfbe3acaa150b505a5679bd8229 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/support_tut.rst @@ -0,0 +1,154 @@ +.. _support_tut: + +=========================================== +An Introduction to boto's Support interface +=========================================== + +This tutorial focuses on the boto interface to Amazon Web Services Support, +allowing you to programmatically interact with cases created with Support. +This tutorial assumes that you have already downloaded and installed ``boto``. + +Creating a Connection +--------------------- + +The first step in accessing Support is to create a connection +to the service. There are two ways to do this in boto. The first is: + +>>> from boto.support.connection import SupportConnection +>>> conn = SupportConnection('', '') + +At this point the variable ``conn`` will point to a ``SupportConnection`` +object. In this example, the AWS access key and AWS secret key are passed in to +the method explicitly. Alternatively, you can set the environment variables: + +**AWS_ACCESS_KEY_ID** + Your AWS Access Key ID + +**AWS_SECRET_ACCESS_KEY** + Your AWS Secret Access Key + +and then call the constructor without any arguments, like this: + +>>> conn = SupportConnection() + +There is also a shortcut function in boto +that makes it easy to create Support connections: + +>>> import boto.support +>>> conn = boto.support.connect_to_region('us-west-2') + +In either case, ``conn`` points to a ``SupportConnection`` object which we will +use throughout the remainder of this tutorial. + + +Describing Existing Cases +------------------------- + +If you have existing cases or want to fetch cases in the future, you'll +use the ``SupportConnection.describe_cases`` method. For example:: + + >>> cases = conn.describe_cases() + >>> len(cases['cases']) + 1 + >>> cases['cases'][0]['title'] + 'A test case.' + >>> cases['cases'][0]['caseId'] + 'case-...' + +You can also fetch a set of cases (or single case) by providing a +``case_id_list`` parameter:: + + >>> cases = conn.describe_cases(case_id_list=['case-1']) + >>> len(cases['cases']) + 1 + >>> cases['cases'][0]['title'] + 'A test case.' + >>> cases['cases'][0]['caseId'] + 'case-...' + + +Describing Service Codes +------------------------ + +In order to create a new case, you'll need to fetch the service (& category) +codes available to you. Fetching them is a simple call to:: + + >>> services = conn.describe_services() + >>> services['services'][0]['code'] + 'amazon-cloudsearch' + +If you only care about certain services, you can pass a list of service codes:: + + >>> service_details = conn.describe_services(service_code_list=[ + ... 'amazon-cloudsearch', + ... 'amazon-dynamodb', + ... ]) + + +Describing Severity Levels +-------------------------- + +In order to create a new case, you'll also need to fetch the severity levels +available to you. Fetching them looks like:: + + >>> severities = conn.describe_severity_levels() + >>> severities['severityLevels'][0]['code'] + 'low' + + +Creating a Case +--------------- + +Upon creating a connection to Support, you can now work with existing Support +cases, create new cases or resolve them. We'll start with creating a new case:: + + >>> new_case = conn.create_case( + ... subject='This is a test case.', + ... service_code='', + ... category_code='', + ... communication_body="", + ... severity_code='low' + ... ) + >>> new_case['caseId'] + 'case-...' + +For the ``service_code/category_code`` parameters, you'll need to do a +``SupportConnection.describe_services`` call, then select the appropriate +service code (& appropriate category code within that service) from the +response. + +For the ``severity_code`` parameter, you'll need to do a +``SupportConnection.describe_severity_levels`` call, then select the appropriate +severity code from the response. + + +Adding to a Case +---------------- + +Since the purpose of a support case involves back-and-forth communication, +you can add additional communication to the case as well. Providing a response +might look like:: + + >>> result = conn.add_communication_to_case( + ... communication_body="This is a followup. It's working now." + ... case_id='case-...' + ... ) + + +Fetching all Communications for a Case +-------------------------------------- + +Getting all communications for a given case looks like:: + + >>> communications = conn.describe_communications('case-...') + + +Resolving a Case +---------------- + +Once a case is finished, you should mark it as resolved to close it out. +Resolving a case looks like:: + + >>> closed = conn.resolve_case(case_id='case-...') + >>> closed['result'] + True diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/swf_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/swf_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..ffbacfd2c636cf3e02b9064c1068ef30141bf727 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/swf_tut.rst @@ -0,0 +1,663 @@ +.. swf_tut: + :Authors: Slawek "oozie" Ligus , Brad Morris + +=============================== +Amazon Simple Workflow Tutorial +=============================== + +This tutorial focuses on boto's interface to AWS SimpleWorkflow service. + +.. _SimpleWorkflow: http://aws.amazon.com/swf/ + +What is a workflow? +------------------- + +A workflow is a sequence of multiple activities aimed at accomplishing a well-defined objective. For instance, booking an airline ticket as a workflow may encompass multiple activities, such as selection of itinerary, submission of personal details, payment validation and booking confirmation. + +Except for the start and completion of a workflow, each step has a well-defined predecessor and successor. With that + - on successful completion of an activity the workflow can progress with its execution, + - when one of workflow's activities fails it can be retried, + - and when it keeps failing repeatedly the workflow may regress to the previous step to gather alternative inputs or it may simply fail at that stage. + +Why use workflows? +------------------ + +Modelling an application on a workflow provides a useful abstraction layer for writing highly-reliable programs for distributed systems, as individual responsibilities can be delegated to a set of redundant, independent and non-critical processing units. + +How does Amazon SWF help you accomplish this? +--------------------------------------------- + +Amazon SimpleWorkflow service defines an interface for workflow orchestration and provides state persistence for workflow executions. + +Amazon SWF applications involve communication between the following entities: + - The Amazon Simple Workflow Service - providing centralized orchestration and workflow state persistence, + - Workflow Executors - some entity starting workflow executions, typically through an action taken by a user or from a cronjob. + - Deciders - a program codifying the business logic, i.e. a set of instructions and decisions. Deciders take decisions based on initial set of conditions and outcomes from activities. + - Activity Workers - their objective is very straightforward: to take inputs, execute the tasks and return a result to the Service. + +The Workflow Executor contacts SWF Service and requests instantiation of a workflow. A new workflow is created and its state is stored in the service. +The next time a decider contacts SWF service to ask for a decision task, it will be informed about a new workflow execution is taking place and it will be asked to advise SWF service on what the next steps should be. The decider then instructs the service to dispatch specific tasks to activity workers. At the next activity worker poll, the task is dispatched, then executed and the results reported back to the SWF, which then passes them onto the deciders. This exchange keeps happening repeatedly until the decider is satisfied and instructs the service to complete the execution. + +Prerequisites +------------- + +You need a valid access and secret key. The examples below assume that you have exported them to your environment, as follows: + +.. code-block:: bash + + bash$ export AWS_ACCESS_KEY_ID= + bash$ export AWS_SECRET_ACCESS_KEY= + +Before workflows and activities can be used, they have to be registered with SWF service: + +.. code-block:: python + + # register.py + import boto.swf.layer2 as swf + from boto.swf.exceptions import SWFTypeAlreadyExistsError, SWFDomainAlreadyExistsError + DOMAIN = 'boto_tutorial' + VERSION = '1.0' + + registerables = [] + registerables.append(swf.Domain(name=DOMAIN)) + for workflow_type in ('HelloWorkflow', 'SerialWorkflow', 'ParallelWorkflow', 'SubWorkflow'): + registerables.append(swf.WorkflowType(domain=DOMAIN, name=workflow_type, version=VERSION, task_list='default')) + + for activity_type in ('HelloWorld', 'ActivityA', 'ActivityB', 'ActivityC'): + registerables.append(swf.ActivityType(domain=DOMAIN, name=activity_type, version=VERSION, task_list='default')) + + for swf_entity in registerables: + try: + swf_entity.register() + print swf_entity.name, 'registered successfully' + except (SWFDomainAlreadyExistsError, SWFTypeAlreadyExistsError): + print swf_entity.__class__.__name__, swf_entity.name, 'already exists' + + +Execution of the above should produce no errors. + +.. code-block:: bash + + bash$ python -i register.py + Domain boto_tutorial already exists + WorkflowType HelloWorkflow already exists + SerialWorkflow registered successfully + ParallelWorkflow registered successfully + ActivityType HelloWorld already exists + ActivityA registered successfully + ActivityB registered successfully + ActivityC registered successfully + >>> + +HelloWorld +---------- + +This example is an implementation of a minimal Hello World workflow. Its execution should unfold as follows: + +#. A workflow execution is started. +#. The SWF service schedules the initial decision task. +#. A decider polls for decision tasks and receives one. +#. The decider requests scheduling of an activity task. +#. The SWF service schedules the greeting activity task. +#. An activity worker polls for activity task and receives one. +#. The worker completes the greeting activity. +#. The SWF service schedules a decision task to inform about work outcome. +#. The decider polls and receives a new decision task. +#. The decider schedules workflow completion. +#. The workflow execution finishes. + +Workflow logic is encoded in the decider: + +.. code-block:: python + + # hello_decider.py + import boto.swf.layer2 as swf + + DOMAIN = 'boto_tutorial' + ACTIVITY = 'HelloWorld' + VERSION = '1.0' + TASKLIST = 'default' + + class HelloDecider(swf.Decider): + + domain = DOMAIN + task_list = TASKLIST + version = VERSION + + def run(self): + history = self.poll() + if 'events' in history: + # Find workflow events not related to decision scheduling. + workflow_events = [e for e in history['events'] + if not e['eventType'].startswith('Decision')] + last_event = workflow_events[-1] + + decisions = swf.Layer1Decisions() + if last_event['eventType'] == 'WorkflowExecutionStarted': + decisions.schedule_activity_task('saying_hi', ACTIVITY, VERSION, task_list=TASKLIST) + elif last_event['eventType'] == 'ActivityTaskCompleted': + decisions.complete_workflow_execution() + self.complete(decisions=decisions) + return True + +The activity worker is responsible for printing the greeting message when the activity task is dispatched to it by the service: + +.. code-block:: python + + import boto.swf.layer2 as swf + + DOMAIN = 'boto_tutorial' + VERSION = '1.0' + TASKLIST = 'default' + + class HelloWorker(swf.ActivityWorker): + + domain = DOMAIN + version = VERSION + task_list = TASKLIST + + def run(self): + activity_task = self.poll() + if 'activityId' in activity_task: + print 'Hello, World!' + self.complete() + return True + +With actors implemented we can spin up a workflow execution: + +.. code-block:: bash + + $ python + >>> import boto.swf.layer2 as swf + >>> execution = swf.WorkflowType(name='HelloWorkflow', domain='boto_tutorial', version='1.0', task_list='default').start() + >>> + +From separate terminals run an instance of a worker and a decider to carry out a workflow execution (the worker and decider may run from two independent machines). + +.. code-block:: bash + + $ python -i hello_decider.py + >>> while HelloDecider().run(): pass + ... + +.. code-block:: bash + + $ python -i hello_worker.py + >>> while HelloWorker().run(): pass + ... + Hello, World! + +Great. Now, to see what just happened, go back to the original terminal from which the execution was started, and read its history. + +.. code-block:: bash + + >>> execution.history() + [{'eventId': 1, + 'eventTimestamp': 1381095173.2539999, + 'eventType': 'WorkflowExecutionStarted', + 'workflowExecutionStartedEventAttributes': {'childPolicy': 'TERMINATE', + 'executionStartToCloseTimeout': '3600', + 'parentInitiatedEventId': 0, + 'taskList': {'name': 'default'}, + 'taskStartToCloseTimeout': '300', + 'workflowType': {'name': 'HelloWorkflow', + 'version': '1.0'}}}, + {'decisionTaskScheduledEventAttributes': {'startToCloseTimeout': '300', + 'taskList': {'name': 'default'}}, + 'eventId': 2, + 'eventTimestamp': 1381095173.2539999, + 'eventType': 'DecisionTaskScheduled'}, + {'decisionTaskStartedEventAttributes': {'scheduledEventId': 2}, + 'eventId': 3, + 'eventTimestamp': 1381095177.5439999, + 'eventType': 'DecisionTaskStarted'}, + {'decisionTaskCompletedEventAttributes': {'scheduledEventId': 2, + 'startedEventId': 3}, + 'eventId': 4, + 'eventTimestamp': 1381095177.855, + 'eventType': 'DecisionTaskCompleted'}, + {'activityTaskScheduledEventAttributes': {'activityId': 'saying_hi', + 'activityType': {'name': 'HelloWorld', + 'version': '1.0'}, + 'decisionTaskCompletedEventId': 4, + 'heartbeatTimeout': '600', + 'scheduleToCloseTimeout': '3900', + 'scheduleToStartTimeout': '300', + 'startToCloseTimeout': '3600', + 'taskList': {'name': 'default'}}, + 'eventId': 5, + 'eventTimestamp': 1381095177.855, + 'eventType': 'ActivityTaskScheduled'}, + {'activityTaskStartedEventAttributes': {'scheduledEventId': 5}, + 'eventId': 6, + 'eventTimestamp': 1381095179.427, + 'eventType': 'ActivityTaskStarted'}, + {'activityTaskCompletedEventAttributes': {'scheduledEventId': 5, + 'startedEventId': 6}, + 'eventId': 7, + 'eventTimestamp': 1381095179.6989999, + 'eventType': 'ActivityTaskCompleted'}, + {'decisionTaskScheduledEventAttributes': {'startToCloseTimeout': '300', + 'taskList': {'name': 'default'}}, + 'eventId': 8, + 'eventTimestamp': 1381095179.6989999, + 'eventType': 'DecisionTaskScheduled'}, + {'decisionTaskStartedEventAttributes': {'scheduledEventId': 8}, + 'eventId': 9, + 'eventTimestamp': 1381095179.7420001, + 'eventType': 'DecisionTaskStarted'}, + {'decisionTaskCompletedEventAttributes': {'scheduledEventId': 8, + 'startedEventId': 9}, + 'eventId': 10, + 'eventTimestamp': 1381095180.026, + 'eventType': 'DecisionTaskCompleted'}, + {'eventId': 11, + 'eventTimestamp': 1381095180.026, + 'eventType': 'WorkflowExecutionCompleted', + 'workflowExecutionCompletedEventAttributes': {'decisionTaskCompletedEventId': 10}}] + + +Serial Activity Execution +------------------------- + +The following example implements a basic workflow with activities executed one after another. + +The business logic, i.e. the serial execution of activities, is encoded in the decider: + +.. code-block:: python + + # serial_decider.py + import time + import boto.swf.layer2 as swf + + class SerialDecider(swf.Decider): + + domain = 'boto_tutorial' + task_list = 'default_tasks' + version = '1.0' + + def run(self): + history = self.poll() + if 'events' in history: + # Get a list of non-decision events to see what event came in last. + workflow_events = [e for e in history['events'] + if not e['eventType'].startswith('Decision')] + decisions = swf.Layer1Decisions() + # Record latest non-decision event. + last_event = workflow_events[-1] + last_event_type = last_event['eventType'] + if last_event_type == 'WorkflowExecutionStarted': + # Schedule the first activity. + decisions.schedule_activity_task('%s-%i' % ('ActivityA', time.time()), + 'ActivityA', self.version, task_list='a_tasks') + elif last_event_type == 'ActivityTaskCompleted': + # Take decision based on the name of activity that has just completed. + # 1) Get activity's event id. + last_event_attrs = last_event['activityTaskCompletedEventAttributes'] + completed_activity_id = last_event_attrs['scheduledEventId'] - 1 + # 2) Extract its name. + activity_data = history['events'][completed_activity_id] + activity_attrs = activity_data['activityTaskScheduledEventAttributes'] + activity_name = activity_attrs['activityType']['name'] + # 3) Optionally, get the result from the activity. + result = last_event['activityTaskCompletedEventAttributes'].get('result') + + # Take the decision. + if activity_name == 'ActivityA': + decisions.schedule_activity_task('%s-%i' % ('ActivityB', time.time()), + 'ActivityB', self.version, task_list='b_tasks', input=result) + if activity_name == 'ActivityB': + decisions.schedule_activity_task('%s-%i' % ('ActivityC', time.time()), + 'ActivityC', self.version, task_list='c_tasks', input=result) + elif activity_name == 'ActivityC': + # Final activity completed. We're done. + decisions.complete_workflow_execution() + + self.complete(decisions=decisions) + return True + +The workers only need to know which task lists to poll. + +.. code-block:: python + + # serial_worker.py + import time + import boto.swf.layer2 as swf + + class MyBaseWorker(swf.ActivityWorker): + + domain = 'boto_tutorial' + version = '1.0' + task_list = None + + def run(self): + activity_task = self.poll() + if 'activityId' in activity_task: + # Get input. + # Get the method for the requested activity. + try: + print 'working on activity from tasklist %s at %i' % (self.task_list, time.time()) + self.activity(activity_task.get('input')) + except Exception, error: + self.fail(reason=str(error)) + raise error + + return True + + def activity(self, activity_input): + raise NotImplementedError + + class WorkerA(MyBaseWorker): + task_list = 'a_tasks' + def activity(self, activity_input): + self.complete(result="Now don't be givin him sambuca!") + + class WorkerB(MyBaseWorker): + task_list = 'b_tasks' + def activity(self, activity_input): + self.complete() + + class WorkerC(MyBaseWorker): + task_list = 'c_tasks' + def activity(self, activity_input): + self.complete() + + +Spin up a workflow execution and run the decider: + +.. code-block:: bash + + $ python + >>> import boto.swf.layer2 as swf + >>> execution = swf.WorkflowType(name='SerialWorkflow', domain='boto_tutorial', version='1.0', task_list='default_tasks').start() + >>> + +.. code-block:: bash + + $ python -i serial_decider.py + >>> while SerialDecider().run(): pass + ... + + +Run the workers. The activities will be executed in order: + +.. code-block:: bash + + $ python -i serial_worker.py + >>> while WorkerA().run(): pass + ... + working on activity from tasklist a_tasks at 1382046291 + +.. code-block:: bash + + $ python -i serial_worker.py + >>> while WorkerB().run(): pass + ... + working on activity from tasklist b_tasks at 1382046541 + +.. code-block:: bash + + $ python -i serial_worker.py + >>> while WorkerC().run(): pass + ... + working on activity from tasklist c_tasks at 1382046560 + + +Looks good. Now, do the following to inspect the state and history of the execution: + +.. code-block:: python + + >>> execution.describe() + {'executionConfiguration': {'childPolicy': 'TERMINATE', + 'executionStartToCloseTimeout': '3600', + 'taskList': {'name': 'default_tasks'}, + 'taskStartToCloseTimeout': '300'}, + 'executionInfo': {'cancelRequested': False, + 'closeStatus': 'COMPLETED', + 'closeTimestamp': 1382046560.901, + 'execution': {'runId': '12fQ1zSaLmI5+lLXB8ux+8U+hLOnnXNZCY9Zy+ZvXgzhE=', + 'workflowId': 'SerialWorkflow-1.0-1382046514'}, + 'executionStatus': 'CLOSED', + 'startTimestamp': 1382046514.994, + 'workflowType': {'name': 'SerialWorkflow', 'version': '1.0'}}, + 'latestActivityTaskTimestamp': 1382046560.632, + 'openCounts': {'openActivityTasks': 0, + 'openChildWorkflowExecutions': 0, + 'openDecisionTasks': 0, + 'openTimers': 0}} + >>> execution.history() + ... + +Parallel Activity Execution +--------------------------- + +When activities are independent from one another, their execution may be scheduled in parallel. + +The decider schedules all activities at once and marks progress until all activities are completed, at which point the workflow is completed. + +.. code-block:: python + + # parallel_decider.py + + import boto.swf.layer2 as swf + import time + + SCHED_COUNT = 5 + + class ParallelDecider(swf.Decider): + + domain = 'boto_tutorial' + task_list = 'default' + def run(self): + decision_task = self.poll() + if 'events' in decision_task: + decisions = swf.Layer1Decisions() + # Decision* events are irrelevant here and can be ignored. + workflow_events = [e for e in decision_task['events'] + if not e['eventType'].startswith('Decision')] + # Record latest non-decision event. + last_event = workflow_events[-1] + last_event_type = last_event['eventType'] + if last_event_type == 'WorkflowExecutionStarted': + # At start, kickoff SCHED_COUNT activities in parallel. + for i in range(SCHED_COUNT): + decisions.schedule_activity_task('activity%i' % i, 'ActivityA', '1.0', + task_list=self.task_list) + elif last_event_type == 'ActivityTaskCompleted': + # Monitor progress. When all activities complete, complete workflow. + completed_count = sum([1 for a in decision_task['events'] + if a['eventType'] == 'ActivityTaskCompleted']) + print '%i/%i' % (completed_count, SCHED_COUNT) + if completed_count == SCHED_COUNT: + decisions.complete_workflow_execution() + self.complete(decisions=decisions) + return True + +Again, the only bit of information a worker needs is which task list to poll. + +.. code-block:: python + + # parallel_worker.py + import time + import boto.swf.layer2 as swf + + class ParallelWorker(swf.ActivityWorker): + + domain = 'boto_tutorial' + task_list = 'default' + + def run(self): + """Report current time.""" + activity_task = self.poll() + if 'activityId' in activity_task: + print 'working on', activity_task['activityId'] + self.complete(result=str(time.time())) + return True + +Spin up a workflow execution and run the decider: + +.. code-block:: bash + + $ python -i parallel_decider.py + >>> execution = swf.WorkflowType(name='ParallelWorkflow', domain='boto_tutorial', version='1.0', task_list='default').start() + >>> while ParallelDecider().run(): pass + ... + 1/5 + 2/5 + 4/5 + 5/5 + +Run two or more workers to see how the service partitions work execution in parallel. + +.. code-block:: bash + + $ python -i parallel_worker.py + >>> while ParallelWorker().run(): pass + ... + working on activity1 + working on activity3 + working on activity4 + +.. code-block:: bash + + $ python -i parallel_worker.py + >>> while ParallelWorker().run(): pass + ... + working on activity2 + working on activity0 + +As seen above, the work was partitioned between the two running workers. + +Sub-Workflows +------------- + +Sometimes it's desired or necessary to break the process up into multiple workflows. + +Since the decider is stateless, it's up to you to determine which workflow is being used and which action +you would like to take. + +.. code-block:: python + + import boto.swf.layer2 as swf + + class SubWorkflowDecider(swf.Decider): + + domain = 'boto_tutorial' + task_list = 'default' + version = '1.0' + + def run(self): + history = self.poll() + events = [] + if 'events' in history: + events = history['events'] + # Collect the entire history if there are enough events to become paginated + while 'nextPageToken' in history: + history = self.poll(next_page_token=history['nextPageToken']) + if 'events' in history: + events = events + history['events'] + + workflow_type = history['workflowType']['name'] + + # Get all of the relevent events that have happened since the last decision task was started + workflow_events = [e for e in events + if e['eventId'] > history['previousStartedEventId'] and + not e['eventType'].startswith('Decision')] + + decisions = swf.Layer1Decisions() + + for event in workflow_events: + last_event_type = event['eventType'] + if last_event_type == 'WorkflowExecutionStarted': + if workflow_type == 'SerialWorkflow': + decisions.start_child_workflow_execution('SubWorkflow', self.version, + "subworkflow_1", task_list=self.task_list, input="sub_1") + elif workflow_type == 'SubWorkflow': + for i in range(2): + decisions.schedule_activity_task("activity_%d" % i, 'ActivityA', self.version, task_list='a_tasks') + else: + decisions.fail_workflow_execution(reason="Unknown workflow %s" % workflow_type) + break + + elif last_event_type == 'ChildWorkflowExecutionCompleted': + decisions.schedule_activity_task("activity_2", 'ActivityB', self.version, task_list='b_tasks') + + elif last_event_type == 'ActivityTaskCompleted': + attrs = event['activityTaskCompletedEventAttributes'] + activity = events[attrs['scheduledEventId'] - 1] + activity_name = activity['activityTaskScheduledEventAttributes']['activityType']['name'] + + if activity_name == 'ActivityA': + completed_count = sum([1 for a in events if a['eventType'] == 'ActivityTaskCompleted']) + if completed_count == 2: + # Complete the child workflow + decisions.complete_workflow_execution() + elif activity_name == 'ActivityB': + # Complete the parent workflow + decisions.complete_workflow_execution() + + self.complete(decisions=decisions) + return True + +Misc +---- + +Some of these things are not obvious by reading the API documents, so hopefully they help you +avoid some time-consuming pitfalls. + +Pagination +========== + +When the decider polls for new tasks, the maximum number of events it will return at a time is 100 +(configurable to a smaller number, but not larger). When running a workflow, this number gets quickly +exceeded. If it does, the decision task will contain a key ``nextPageToken`` which can be submit to the +``poll()`` call to get the next page of events. + +.. code-block:: python + + decision_task = self.poll() + + events = [] + if 'events' in decision_task: + events = decision_task['events'] + while 'nextPageToken' in decision_task: + decision_task = self.poll(next_page_token=decision_task['nextPageToken']) + if 'events' in decision_task: + events += decision_task['events'] + +Depending on your workflow logic, you might not need to aggregate all of the events. + +Decision Tasks +============== + +When first running deciders and activities, it may seem that the decider gets called for every event that +an activity triggers; however, this is not the case. More than one event can happen between decision tasks. +The decision task will contain a key ``previousStartedEventId`` that lets you know the ``eventId`` of the +last DecisionTaskStarted event that was processed. Your script will need to handle all of the events +that have happened since then, not just the last activity. + +.. code-block:: python + + workflow_events = [e for e in events if e['eventId'] > decision_task['previousStartedEventId']] + +You may also wish to still filter out tasks that start with 'Decision' or filter it in some other way +that fulfills your needs. You will now have to iterate over the workflow_events list and respond to +each event, as it may contain multiple events. + +Filtering Events +================ + +When running many tasks in parallel, a common task is searching through the history to see how many events +of a particular activity type started, completed, and/or failed. Some basic list comprehension makes +this trivial. + +.. code-block:: python + + def filter_completed_events(self, events, type): + completed = [e for e in events if e['eventType'] == 'ActivityTaskCompleted'] + orig = [events[e['activityTaskCompletedEventAttributes']['scheduledEventId']-1] for e in completed] + return [e for e in orig if e['activityTaskScheduledEventAttributes']['activityType']['name'] == type] + +.. _Amazon SWF API Reference: http://docs.aws.amazon.com/amazonswf/latest/apireference/Welcome.html +.. _StackOverflow questions: http://stackoverflow.com/questions/tagged/amazon-swf +.. _Miscellaneous Blog Articles: http://log.ooz.ie/search/label/SimpleWorkflow diff --git a/desktop/core/ext-py/boto-2.38.0/docs/source/vpc_tut.rst b/desktop/core/ext-py/boto-2.38.0/docs/source/vpc_tut.rst new file mode 100644 index 0000000000000000000000000000000000000000..ca0a9c4a4a2a95567b0d29ee66c2d8c0f917fa54 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/docs/source/vpc_tut.rst @@ -0,0 +1,138 @@ +.. _vpc_tut: + +======================================= +An Introduction to boto's VPC interface +======================================= + +This tutorial is based on the examples in the Amazon Virtual Private +Cloud Getting Started Guide (http://docs.amazonwebservices.com/AmazonVPC/latest/GettingStartedGuide/). +In each example, it tries to show the boto request that correspond to +the AWS command line tools. + +Creating a VPC connection +------------------------- +First, we need to create a new VPC connection: + +>>> from boto.vpc import VPCConnection +>>> c = VPCConnection() + +To create a VPC +--------------- +Now that we have a VPC connection, we can create our first VPC. + +>>> vpc = c.create_vpc('10.0.0.0/24') +>>> vpc +VPC:vpc-6b1fe402 +>>> vpc.id +u'vpc-6b1fe402' +>>> vpc.state +u'pending' +>>> vpc.cidr_block +u'10.0.0.0/24' +>>> vpc.dhcp_options_id +u'default' +>>> + +To create a subnet +------------------ +The next step is to create a subnet to associate with your VPC. + +>>> subnet = c.create_subnet(vpc.id, '10.0.0.0/25') +>>> subnet.id +u'subnet-6a1fe403' +>>> subnet.state +u'pending' +>>> subnet.cidr_block +u'10.0.0.0/25' +>>> subnet.available_ip_address_count +123 +>>> subnet.availability_zone +u'us-east-1b' +>>> + +To create a customer gateway +---------------------------- +Next, we create a customer gateway. + +>>> cg = c.create_customer_gateway('ipsec.1', '12.1.2.3', 65534) +>>> cg.id +u'cgw-b6a247df' +>>> cg.type +u'ipsec.1' +>>> cg.state +u'available' +>>> cg.ip_address +u'12.1.2.3' +>>> cg.bgp_asn +u'65534' +>>> + +To create a VPN gateway +----------------------- + +>>> vg = c.create_vpn_gateway('ipsec.1') +>>> vg.id +u'vgw-44ad482d' +>>> vg.type +u'ipsec.1' +>>> vg.state +u'pending' +>>> vg.availability_zone +u'us-east-1b' +>>> + +Attaching a VPN Gateway to a VPC +-------------------------------- + +>>> vg.attach(vpc.id) +>>> + +Associating an Elastic IP with a VPC Instance +--------------------------------------------- + +>>> ec2.connection.associate_address('i-71b2f60b', None, 'eipalloc-35cf685d') +>>> + +Releasing an Elastic IP Attached to a VPC Instance +-------------------------------------------------- + +>>> ec2.connection.release_address(None, 'eipalloc-35cf685d') +>>> + +To Get All VPN Connections +-------------------------- +>>> vpns = c.get_all_vpn_connections() +>>> vpns[0].id +u'vpn-12ef67bv' +>>> tunnels = vpns[0].tunnels +>>> tunnels +[VpnTunnel: 177.12.34.56, VpnTunnel: 177.12.34.57] + + +To Create VPC Peering Connection +-------------------------------- +>>> vpcs = c.get_all_vpcs() +>>> vpc_peering_connection = c.create_vpc_peering_connection(vpcs[0].id, vpcs[1].id) +>>> vpc_peering_connection +VpcPeeringConnection:pcx-18987471 + +To Accept VPC Peering Connection +-------------------------------- +>>> vpc_peering_connections = c.get_all_vpc_peering_connections() +>>> vpc_peering_connection = vpc_peering_connections[0] +>>> vpc_peering_connection.status_code +u'pending-acceptance' +>>> vpc_peering_connection = c.accept_vpc_peering_connection(vpc_peering_connection.id) +>>> vpc_peering_connection.update() +u'active' + +To Reject VPC Peering Connection +-------------------------------- +>>> vpc_peering_connections = c.get_all_vpc_peering_connections() +>>> vpc_peering_connection = vpc_peering_connections[0] +>>> vpc_peering_connection.status_code +u'pending-acceptance +>>> c.reject_vpc_peering_connection(vpc_peering_connection.id) +>>> vpc_peering_connection.update() +u'rejected' + diff --git a/desktop/core/ext-py/boto-2.38.0/pylintrc b/desktop/core/ext-py/boto-2.38.0/pylintrc new file mode 100644 index 0000000000000000000000000000000000000000..723cbcb0b90ac93547b0f40db73774552f399125 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/pylintrc @@ -0,0 +1,301 @@ +# lint Python modules using external checkers. +# +# This is the main checker controlling the other ones and the reports +# generation. It is itself both a raw checker and an astng checker in order +# to: +# * handle message activation / deactivation at the module level +# * handle some basic but necessary stats'data (number of classes, methods...) +# +[MASTER] + + +# Specify a configuration file. +#rcfile= + +# Profiled execution. +profile=no + +# Add to the black list. It should be a base name, not a +# path. You may set this option multiple times. +ignore=.svn + +# Pickle collected data for later comparisons. +persistent=yes + +# Set the cache size for astng objects. +cache-size=500 + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + + +[MESSAGES CONTROL] + +# Enable only checker(s) with the given id(s). This option conflict with the +# disable-checker option +#enable-checker= + +# Enable all checker(s) except those with the given id(s). This option conflict +# with the disable-checker option +#disable-checker= + +# Enable all messages in the listed categories. +#enable-msg-cat= + +# Disable all messages in the listed categories. +#disable-msg-cat= + +# Enable the message(s) with the given id(s). +#enable-msg= + +# Disable the message(s) with the given id(s). +# disable-msg=C0323,W0142,C0301,C0103,C0111,E0213,C0302,C0203,W0703,R0201 +disable-msg=C0301,C0111,C0103,R0201,W0702,C0324 + +[REPORTS] + +# set the output format. Available formats are text, parseable, colorized and +# html +output-format=colorized + +# Include message's id in output +include-ids=yes + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". +files-output=no + +# Tells wether to display a full report or only the messages +reports=yes + +# Python expression which should return a note less than 10 (10 is the highest +# note).You have access to the variables errors warning, statement which +# respectivly contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (R0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Add a comment according to your evaluation note. This is used by the global +# evaluation report (R0004). +comment=no + +# Enable the report(s) with the given id(s). +#enable-report= + +# Disable the report(s) with the given id(s). +#disable-report= + +# checks for +# * unused variables / imports +# * undefined variables +# * redefinition of variable from builtins or from an outer scope +# * use of variable before assigment +# +[VARIABLES] + +# Tells wether we should check for unused import in __init__ files. +init-import=yes + +# A regular expression matching names used for dummy variables (i.e. not used). +dummy-variables-rgx=_|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + + +# try to find bugs in the code using type inference +# +[TYPECHECK] + +# Tells wether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# When zope mode is activated, consider the acquired-members option to ignore +# access to some undefined attributes. +zope=no + +# List of members which are usually get through zope's acquisition mecanism and +# so shouldn't trigger E0201 when accessed (need zope=yes to be considered). +acquired-members=REQUEST,acl_users,aq_parent + + +# checks for : +# * doc strings +# * modules / classes / functions / methods / arguments / variables name +# * number of arguments, local variables, branches, returns and statements in +# functions, methods +# * required module attributes +# * dangerous default values as arguments +# * redefinition of function / method / class +# * uses of the global statement +# +[BASIC] + +# Required attributes for module, separated by a comma +required-attributes= + +# Regular expression which should only match functions or classes name which do +# not require a docstring +no-docstring-rgx=__.*__ + +# Regular expression which should only match correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression which should only match correct module level names +const-rgx=(([A-Z_][A-Z1-9_]*)|(__.*__))$ + +# Regular expression which should only match correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression which should only match correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct instance attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct list comprehension / +# generator expression variable names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# List of builtins function names that should not be used, separated by a comma +bad-functions=apply,input + + +# checks for sign of poor/misdesign: +# * number of methods, attributes, local variables... +# * size, complexity of functions, methods +# +[DESIGN] + +# Maximum number of arguments for function / method +max-args=12 + +# Maximum number of locals for function / method body +max-locals=30 + +# Maximum number of return / yield for function / method body +max-returns=12 + +# Maximum number of branch for function / method body +max-branchs=30 + +# Maximum number of statements in function / method body +max-statements=60 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=20 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=0 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + + +# checks for +# * external modules dependencies +# * relative / wildcard imports +# * cyclic imports +# * uses of deprecated modules +# +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,string,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report R0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report R0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report R0402 must +# not be disabled) +int-import-graph= + + +# checks for : +# * methods without self as first argument +# * overridden methods signature +# * access only to existant members via self +# * attributes not defined in the __init__ method +# * supported interfaces implementation +# * unreachable code +# +[CLASSES] + +# List of interface methods to ignore, separated by a comma. This is used for +# instance to not check methods defines in Zope's Interface base class. +# ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + + +# checks for similarities and duplicated code. This computation may be +# memory / CPU intensive, so you should disable it if you experiments some +# problems. +# +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=5 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + + +# checks for: +# * warning notes in the code like FIXME, XXX +# * PEP 263: source code with non ascii character but no encoding declaration +# +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO,BUG: + + +# checks for : +# * unauthorized constructions +# * strict indentation +# * line length +# * use of <> instead of != +# +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=80 + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' diff --git a/desktop/core/ext-py/boto-2.38.0/setup.cfg b/desktop/core/ext-py/boto-2.38.0/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..6f08d0e3e7d4475804244ad049d476c45fa48356 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/setup.cfg @@ -0,0 +1,8 @@ +[bdist_wheel] +universal = 1 + +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + diff --git a/desktop/core/ext-py/boto-2.38.0/setup.py b/desktop/core/ext-py/boto-2.38.0/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..5879240e1ec5d29a190479708e5a65aa48b06fc1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/setup.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python + +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from __future__ import print_function + +try: + from setuptools import setup + extra = dict(test_suite="tests.test.suite", include_package_data=True) +except ImportError: + from distutils.core import setup + extra = {} + +import sys + +from boto import __version__ + +if sys.version_info <= (2, 5): + error = "ERROR: boto requires Python Version 2.6 or above...exiting." + print(error, file=sys.stderr) + sys.exit(1) + +def readme(): + with open("README.rst") as f: + return f.read() + +setup(name = "boto", + version = __version__, + description = "Amazon Web Services Library", + long_description = readme(), + author = "Mitch Garnaat", + author_email = "mitch@garnaat.com", + scripts = ["bin/sdbadmin", "bin/elbadmin", "bin/cfadmin", + "bin/s3put", "bin/fetch_file", "bin/launch_instance", + "bin/list_instances", "bin/taskadmin", "bin/kill_instance", + "bin/bundle_image", "bin/pyami_sendmail", "bin/lss3", + "bin/cq", "bin/route53", "bin/cwutil", "bin/instance_events", + "bin/asadmin", "bin/glacier", "bin/mturk", + "bin/dynamodb_dump", "bin/dynamodb_load"], + url = "https://github.com/boto/boto/", + packages = ["boto", "boto.sqs", "boto.s3", "boto.gs", "boto.file", + "boto.ec2", "boto.ec2.cloudwatch", "boto.ec2.autoscale", + "boto.ec2.elb", "boto.sdb", "boto.cacerts", + "boto.sdb.db", "boto.sdb.db.manager", + "boto.mturk", "boto.pyami", + "boto.pyami.installers", "boto.pyami.installers.ubuntu", + "boto.mashups", "boto.contrib", "boto.manage", + "boto.services", "boto.cloudfront", + "boto.roboto", "boto.rds", "boto.vpc", "boto.fps", + "boto.fps", "boto.emr", "boto.emr", "boto.sns", + "boto.ecs", "boto.iam", "boto.route53", "boto.ses", + "boto.cloudformation", "boto.sts", "boto.dynamodb", + "boto.swf", "boto.mws", "boto.cloudsearch", "boto.glacier", + "boto.beanstalk", "boto.datapipeline", "boto.elasticache", + "boto.elastictranscoder", "boto.opsworks", "boto.redshift", + "boto.dynamodb2", "boto.support", "boto.cloudtrail", + "boto.directconnect", "boto.kinesis", "boto.rds2", + "boto.cloudsearch2", "boto.logs", "boto.vendored", + "boto.route53.domains", "boto.cognito", + "boto.cognito.identity", "boto.cognito.sync", + "boto.cloudsearchdomain", "boto.kms", + "boto.awslambda", "boto.codedeploy", "boto.configservice", + "boto.cloudhsm", "boto.ec2containerservice", + "boto.machinelearning"], + package_data = { + "boto.cacerts": ["cacerts.txt"], + "boto": ["endpoints.json"], + }, + license = "MIT", + platforms = "Posix; MacOS X; Windows", + classifiers = ["Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + "Topic :: Internet", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 2.6", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.3", + "Programming Language :: Python :: 3.4"], + **extra + ) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..771ca94b9d140f4b66c789a2339b5b4f3f54a61d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/compat.py b/desktop/core/ext-py/boto-2.38.0/tests/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..783320f34b7992492c5f2cd452d693bd9d9356c6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/compat.py @@ -0,0 +1,38 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# Use unittest2 for older versions of Python +try: + import unittest2 as unittest +except ImportError: + import unittest + +# Use thirdt party ordereddict for older versions of Python +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict + +# Use standard unittest.mock if possible. (mock doesn't support Python 3.4) +try: + from unittest import mock +except ImportError: + import mock diff --git a/desktop/core/ext-py/boto-2.38.0/tests/db/test_lists.py b/desktop/core/ext-py/boto-2.38.0/tests/db/test_lists.py new file mode 100644 index 0000000000000000000000000000000000000000..48612726d5525ad215732ba9fed875610995256c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/db/test_lists.py @@ -0,0 +1,96 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.sdb.db.property import ListProperty +from boto.sdb.db.model import Model +import time + +class SimpleListModel(Model): + """Test the List Property""" + nums = ListProperty(int) + strs = ListProperty(str) + +class TestLists(object): + """Test the List property""" + + def setup_class(cls): + """Setup this class""" + cls.objs = [] + + def teardown_class(cls): + """Remove our objects""" + for o in cls.objs: + try: + o.delete() + except: + pass + + def test_list_order(self): + """Testing the order of lists""" + t = SimpleListModel() + t.nums = [5, 4, 1, 3, 2] + t.strs = ["B", "C", "A", "D", "Foo"] + t.put() + self.objs.append(t) + time.sleep(3) + t = SimpleListModel.get_by_id(t.id) + assert(t.nums == [5, 4, 1, 3, 2]) + assert(t.strs == ["B", "C", "A", "D", "Foo"]) + + def test_old_compat(self): + """Testing to make sure the old method of encoding lists will still return results""" + t = SimpleListModel() + t.put() + self.objs.append(t) + time.sleep(3) + item = t._get_raw_item() + item['strs'] = ["A", "B", "C"] + item.save() + time.sleep(3) + t = SimpleListModel.get_by_id(t.id) + i1 = sorted(item['strs']) + i2 = t.strs + i2.sort() + assert(i1 == i2) + + def test_query_equals(self): + """We noticed a slight problem with querying, since the query uses the same encoder, + it was asserting that the value was at the same position in the list, not just "in" the list""" + t = SimpleListModel() + t.strs = ["Bizzle", "Bar"] + t.put() + self.objs.append(t) + time.sleep(3) + assert(SimpleListModel.find(strs="Bizzle").count() == 1) + assert(SimpleListModel.find(strs="Bar").count() == 1) + assert(SimpleListModel.find(strs=["Bar", "Bizzle"]).count() == 1) + + def test_query_not_equals(self): + """Test a not equal filter""" + t = SimpleListModel() + t.strs = ["Fizzle"] + t.put() + self.objs.append(t) + time.sleep(3) + print SimpleListModel.all().filter("strs !=", "Fizzle").get_query() + for tt in SimpleListModel.all().filter("strs !=", "Fizzle"): + print tt.strs + assert("Fizzle" not in tt.strs) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/db/test_password.py b/desktop/core/ext-py/boto-2.38.0/tests/db/test_password.py new file mode 100644 index 0000000000000000000000000000000000000000..74c340952619067c3cba68149ce827ab119d514e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/db/test_password.py @@ -0,0 +1,128 @@ +# Copyright (c) 2010 Robert Mela +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +import unittest +import logging +import time + +log= logging.getLogger('password_property_test') +log.setLevel(logging.DEBUG) + +class PasswordPropertyTest(unittest.TestCase): + """Test the PasswordProperty""" + + def tearDown(self): + cls=self.test_model() + for obj in cls.all(): obj.delete() + + def hmac_hashfunc(self): + import hmac + def hashfunc(msg): + return hmac.new('mysecret', msg) + return hashfunc + + def test_model(self,hashfunc=None): + from boto.utils import Password + from boto.sdb.db.model import Model + from boto.sdb.db.property import PasswordProperty + import hashlib + class MyModel(Model): + password=PasswordProperty(hashfunc=hashfunc) + return MyModel + + def test_custom_password_class(self): + from boto.utils import Password + from boto.sdb.db.model import Model + from boto.sdb.db.property import PasswordProperty + import hmac, hashlib + + + myhashfunc = hashlib.md5 + ## Define a new Password class + class MyPassword(Password): + hashfunc = myhashfunc #hashlib.md5 #lambda cls,msg: hmac.new('mysecret',msg) + + ## Define a custom password property using the new Password class + + class MyPasswordProperty(PasswordProperty): + data_type=MyPassword + type_name=MyPassword.__name__ + + ## Define a model using the new password property + + class MyModel(Model): + password=MyPasswordProperty()#hashfunc=hashlib.md5) + + obj = MyModel() + obj.password = 'bar' + expected = myhashfunc('bar').hexdigest() #hmac.new('mysecret','bar').hexdigest() + log.debug("\npassword=%s\nexpected=%s" % (obj.password, expected)) + self.assertTrue(obj.password == 'bar' ) + obj.save() + id= obj.id + time.sleep(5) + obj = MyModel.get_by_id(id) + self.assertEquals(obj.password, 'bar') + self.assertEquals(str(obj.password), expected) + #hmac.new('mysecret','bar').hexdigest()) + + + def test_aaa_default_password_property(self): + cls = self.test_model() + obj = cls(id='passwordtest') + obj.password = 'foo' + self.assertEquals('foo', obj.password) + obj.save() + time.sleep(5) + obj = cls.get_by_id('passwordtest') + self.assertEquals('foo', obj.password) + + def test_password_constructor_hashfunc(self): + import hmac + myhashfunc=lambda msg: hmac.new('mysecret', msg) + cls = self.test_model(hashfunc=myhashfunc) + obj = cls() + obj.password='hello' + expected = myhashfunc('hello').hexdigest() + self.assertEquals(obj.password, 'hello') + self.assertEquals(str(obj.password), expected) + obj.save() + id = obj.id + time.sleep(5) + obj = cls.get_by_id(id) + log.debug("\npassword=%s" % obj.password) + self.assertTrue(obj.password == 'hello') + + + +if __name__ == '__main__': + import sys, os + curdir = os.path.dirname( os.path.abspath(__file__) ) + srcroot = curdir + "/../.." + sys.path = [ srcroot ] + sys.path + logging.basicConfig() + log.setLevel(logging.INFO) + suite = unittest.TestLoader().loadTestsFromTestCase(PasswordPropertyTest) + unittest.TextTestRunner(verbosity=2).run(suite) + + import boto + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/db/test_query.py b/desktop/core/ext-py/boto-2.38.0/tests/db/test_query.py new file mode 100644 index 0000000000000000000000000000000000000000..047bf87313b0a09cf5d154fe926e3638b10893b0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/db/test_query.py @@ -0,0 +1,152 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.sdb.db.property import ListProperty, StringProperty, ReferenceProperty, IntegerProperty +from boto.sdb.db.model import Model +import time + +class SimpleModel(Model): + """Simple Test Model""" + name = StringProperty() + strs = ListProperty(str) + num = IntegerProperty() + +class SubModel(SimpleModel): + """Simple Subclassed Model""" + ref = ReferenceProperty(SimpleModel, collection_name="reverse_ref") + + +class TestQuerying(object): + """Test different querying capabilities""" + + def setup_class(cls): + """Setup this class""" + cls.objs = [] + + o = SimpleModel() + o.name = "Simple Object" + o.strs = ["B", "A", "C", "Foo"] + o.num = 1 + o.put() + cls.objs.append(o) + + o2 = SimpleModel() + o2.name = "Referenced Object" + o2.num = 2 + o2.put() + cls.objs.append(o2) + + o3 = SubModel() + o3.name = "Sub Object" + o3.num = 3 + o3.ref = o2 + o3.put() + cls.objs.append(o3) + + time.sleep(3) + + + + def teardown_class(cls): + """Remove our objects""" + for o in cls.objs: + try: + o.delete() + except: + pass + + def test_find(self): + """Test using the "Find" method""" + assert(SimpleModel.find(name="Simple Object").next().id == self.objs[0].id) + assert(SimpleModel.find(name="Referenced Object").next().id == self.objs[1].id) + assert(SimpleModel.find(name="Sub Object").next().id == self.objs[2].id) + + def test_like_filter(self): + """Test a "like" filter""" + query = SimpleModel.all() + query.filter("name like", "% Object") + assert(query.count() == 3) + + query = SimpleModel.all() + query.filter("name not like", "% Object") + assert(query.count() == 0) + + def test_equals_filter(self): + """Test an "=" and "!=" filter""" + query = SimpleModel.all() + query.filter("name =", "Simple Object") + assert(query.count() == 1) + + query = SimpleModel.all() + query.filter("name !=", "Simple Object") + assert(query.count() == 2) + + def test_or_filter(self): + """Test a filter function as an "or" """ + query = SimpleModel.all() + query.filter("name =", ["Simple Object", "Sub Object"]) + assert(query.count() == 2) + + def test_and_filter(self): + """Test Multiple filters which are an "and" """ + query = SimpleModel.all() + query.filter("name like", "% Object") + query.filter("name like", "Simple %") + assert(query.count() == 1) + + def test_none_filter(self): + """Test filtering for a value that's not set""" + query = SimpleModel.all() + query.filter("ref =", None) + assert(query.count() == 2) + + def test_greater_filter(self): + """Test filtering Using >, >=""" + query = SimpleModel.all() + query.filter("num >", 1) + assert(query.count() == 2) + + query = SimpleModel.all() + query.filter("num >=", 1) + assert(query.count() == 3) + + def test_less_filter(self): + """Test filtering Using <, <=""" + query = SimpleModel.all() + query.filter("num <", 3) + assert(query.count() == 2) + + query = SimpleModel.all() + query.filter("num <=", 3) + assert(query.count() == 3) + + def test_query_on_list(self): + """Test querying on a list""" + assert(SimpleModel.find(strs="A").next().id == self.objs[0].id) + assert(SimpleModel.find(strs="B").next().id == self.objs[0].id) + assert(SimpleModel.find(strs="C").next().id == self.objs[0].id) + + def test_like(self): + """Test with a "like" expression""" + query = SimpleModel.all() + query.filter("strs like", "%oo%") + print query.get_query() + assert(query.count() == 1) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/db/test_sequence.py b/desktop/core/ext-py/boto-2.38.0/tests/db/test_sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..b950ee69768f81f55d962d5261bc62c0a5566f67 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/db/test_sequence.py @@ -0,0 +1,109 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class TestDBHandler(object): + """Test the DBHandler""" + + def setup_class(cls): + """Setup this class""" + cls.sequences = [] + + def teardown_class(cls): + """Remove our sequences""" + for s in cls.sequences: + try: + s.delete() + except: + pass + + def test_sequence_generator_no_rollover(self): + """Test the sequence generator without rollover""" + from boto.sdb.db.sequence import SequenceGenerator + gen = SequenceGenerator("ABC") + assert(gen("") == "A") + assert(gen("A") == "B") + assert(gen("B") == "C") + assert(gen("C") == "AA") + assert(gen("AC") == "BA") + + def test_sequence_generator_with_rollover(self): + """Test the sequence generator with rollover""" + from boto.sdb.db.sequence import SequenceGenerator + gen = SequenceGenerator("ABC", rollover=True) + assert(gen("") == "A") + assert(gen("A") == "B") + assert(gen("B") == "C") + assert(gen("C") == "A") + + def test_sequence_simple_int(self): + """Test a simple counter sequence""" + from boto.sdb.db.sequence import Sequence + s = Sequence() + self.sequences.append(s) + assert(s.val == 0) + assert(s.next() == 1) + assert(s.next() == 2) + s2 = Sequence(s.id) + assert(s2.val == 2) + assert(s.next() == 3) + assert(s.val == 3) + assert(s2.val == 3) + + def test_sequence_simple_string(self): + from boto.sdb.db.sequence import Sequence, increment_string + s = Sequence(fnc=increment_string) + self.sequences.append(s) + assert(s.val == "A") + assert(s.next() == "B") + + def test_fib(self): + """Test the fibonacci sequence generator""" + from boto.sdb.db.sequence import fib + # Just check the first few numbers in the sequence + lv = 0 + for v in [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]: + assert(fib(v, lv) == lv+v) + lv = fib(v, lv) + + def test_sequence_fib(self): + """Test the fibonacci sequence""" + from boto.sdb.db.sequence import Sequence, fib + s = Sequence(fnc=fib) + s2 = Sequence(s.id) + self.sequences.append(s) + assert(s.val == 1) + # Just check the first few numbers in the sequence + for v in [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]: + assert(s.next() == v) + assert(s.val == v) + assert(s2.val == v) # it shouldn't matter which reference we use since it's garunteed to be consistent + + def test_sequence_string(self): + """Test the String incrementation sequence""" + from boto.sdb.db.sequence import Sequence, increment_string + s = Sequence(fnc=increment_string) + self.sequences.append(s) + assert(s.val == "A") + assert(s.next() == "B") + s.val = "Z" + assert(s.val == "Z") + assert(s.next() == "AA") diff --git a/desktop/core/ext-py/boto-2.38.0/tests/devpay/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/devpay/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/devpay/test_s3.py b/desktop/core/ext-py/boto-2.38.0/tests/devpay/test_s3.py new file mode 100644 index 0000000000000000000000000000000000000000..86665702dcb3bef91d879e2c7171a5c6e2e32913 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/devpay/test_s3.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python + +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the S3Connection +""" + +import time +import os +import urllib + +from boto.s3.connection import S3Connection +from boto.exception import S3PermissionsError + +# this test requires a devpay product and user token to run: + +AMAZON_USER_TOKEN = '{UserToken}...your token here...' +DEVPAY_HEADERS = { 'x-amz-security-token': AMAZON_USER_TOKEN } + +def test(): + print '--- running S3Connection tests (DevPay) ---' + c = S3Connection() + # create a new, empty bucket + bucket_name = 'test-%d' % int(time.time()) + bucket = c.create_bucket(bucket_name, headers=DEVPAY_HEADERS) + # now try a get_bucket call and see if it's really there + bucket = c.get_bucket(bucket_name, headers=DEVPAY_HEADERS) + # test logging + logging_bucket = c.create_bucket(bucket_name + '-log', headers=DEVPAY_HEADERS) + logging_bucket.set_as_logging_target(headers=DEVPAY_HEADERS) + bucket.enable_logging(target_bucket=logging_bucket, target_prefix=bucket.name, headers=DEVPAY_HEADERS) + bucket.disable_logging(headers=DEVPAY_HEADERS) + c.delete_bucket(logging_bucket, headers=DEVPAY_HEADERS) + # create a new key and store it's content from a string + k = bucket.new_key() + k.name = 'foobar' + s1 = 'This is a test of file upload and download' + s2 = 'This is a second string to test file upload and download' + k.set_contents_from_string(s1, headers=DEVPAY_HEADERS) + fp = open('foobar', 'wb') + # now get the contents from s3 to a local file + k.get_contents_to_file(fp, headers=DEVPAY_HEADERS) + fp.close() + fp = open('foobar') + # check to make sure content read from s3 is identical to original + assert s1 == fp.read(), 'corrupted file' + fp.close() + # test generated URLs + url = k.generate_url(3600, headers=DEVPAY_HEADERS) + file = urllib.urlopen(url) + assert s1 == file.read(), 'invalid URL %s' % url + url = k.generate_url(3600, force_http=True, headers=DEVPAY_HEADERS) + file = urllib.urlopen(url) + assert s1 == file.read(), 'invalid URL %s' % url + bucket.delete_key(k, headers=DEVPAY_HEADERS) + # test a few variations on get_all_keys - first load some data + # for the first one, let's override the content type + phony_mimetype = 'application/x-boto-test' + headers = {'Content-Type': phony_mimetype} + headers.update(DEVPAY_HEADERS) + k.name = 'foo/bar' + k.set_contents_from_string(s1, headers) + k.name = 'foo/bas' + k.set_contents_from_filename('foobar', headers=DEVPAY_HEADERS) + k.name = 'foo/bat' + k.set_contents_from_string(s1, headers=DEVPAY_HEADERS) + k.name = 'fie/bar' + k.set_contents_from_string(s1, headers=DEVPAY_HEADERS) + k.name = 'fie/bas' + k.set_contents_from_string(s1, headers=DEVPAY_HEADERS) + k.name = 'fie/bat' + k.set_contents_from_string(s1, headers=DEVPAY_HEADERS) + # try resetting the contents to another value + md5 = k.md5 + k.set_contents_from_string(s2, headers=DEVPAY_HEADERS) + assert k.md5 != md5 + os.unlink('foobar') + all = bucket.get_all_keys(headers=DEVPAY_HEADERS) + assert len(all) == 6 + rs = bucket.get_all_keys(prefix='foo', headers=DEVPAY_HEADERS) + assert len(rs) == 3 + rs = bucket.get_all_keys(prefix='', delimiter='/', headers=DEVPAY_HEADERS) + assert len(rs) == 2 + rs = bucket.get_all_keys(maxkeys=5, headers=DEVPAY_HEADERS) + assert len(rs) == 5 + # test the lookup method + k = bucket.lookup('foo/bar', headers=DEVPAY_HEADERS) + assert isinstance(k, bucket.key_class) + assert k.content_type == phony_mimetype + k = bucket.lookup('notthere', headers=DEVPAY_HEADERS) + assert k == None + # try some metadata stuff + k = bucket.new_key() + k.name = 'has_metadata' + mdkey1 = 'meta1' + mdval1 = 'This is the first metadata value' + k.set_metadata(mdkey1, mdval1) + mdkey2 = 'meta2' + mdval2 = 'This is the second metadata value' + k.set_metadata(mdkey2, mdval2) + k.set_contents_from_string(s1, headers=DEVPAY_HEADERS) + k = bucket.lookup('has_metadata', headers=DEVPAY_HEADERS) + assert k.get_metadata(mdkey1) == mdval1 + assert k.get_metadata(mdkey2) == mdval2 + k = bucket.new_key() + k.name = 'has_metadata' + k.get_contents_as_string(headers=DEVPAY_HEADERS) + assert k.get_metadata(mdkey1) == mdval1 + assert k.get_metadata(mdkey2) == mdval2 + bucket.delete_key(k, headers=DEVPAY_HEADERS) + # test list and iterator + rs1 = bucket.list(headers=DEVPAY_HEADERS) + num_iter = 0 + for r in rs1: + num_iter = num_iter + 1 + rs = bucket.get_all_keys(headers=DEVPAY_HEADERS) + num_keys = len(rs) + assert num_iter == num_keys + # try a key with a funny character + k = bucket.new_key() + k.name = 'testnewline\n' + k.set_contents_from_string('This is a test', headers=DEVPAY_HEADERS) + rs = bucket.get_all_keys(headers=DEVPAY_HEADERS) + assert len(rs) == num_keys + 1 + bucket.delete_key(k, headers=DEVPAY_HEADERS) + rs = bucket.get_all_keys(headers=DEVPAY_HEADERS) + assert len(rs) == num_keys + # try some acl stuff + bucket.set_acl('public-read', headers=DEVPAY_HEADERS) + policy = bucket.get_acl(headers=DEVPAY_HEADERS) + assert len(policy.acl.grants) == 2 + bucket.set_acl('private', headers=DEVPAY_HEADERS) + policy = bucket.get_acl(headers=DEVPAY_HEADERS) + assert len(policy.acl.grants) == 1 + k = bucket.lookup('foo/bar', headers=DEVPAY_HEADERS) + k.set_acl('public-read', headers=DEVPAY_HEADERS) + policy = k.get_acl(headers=DEVPAY_HEADERS) + assert len(policy.acl.grants) == 2 + k.set_acl('private', headers=DEVPAY_HEADERS) + policy = k.get_acl(headers=DEVPAY_HEADERS) + assert len(policy.acl.grants) == 1 + # try the convenience methods for grants + # this doesn't work with devpay + #bucket.add_user_grant('FULL_CONTROL', + # 'c1e724fbfa0979a4448393c59a8c055011f739b6d102fb37a65f26414653cd67', + # headers=DEVPAY_HEADERS) + try: + bucket.add_email_grant('foobar', 'foo@bar.com', headers=DEVPAY_HEADERS) + except S3PermissionsError: + pass + # now delete all keys in bucket + for k in all: + bucket.delete_key(k, headers=DEVPAY_HEADERS) + # now delete bucket + + c.delete_bucket(bucket, headers=DEVPAY_HEADERS) + + print '--- tests completed ---' + +if __name__ == '__main__': + test() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/fps/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/fps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/fps/test.py b/desktop/core/ext-py/boto-2.38.0/tests/fps/test.py new file mode 100755 index 0000000000000000000000000000000000000000..d5efb4b75c496850c05b7e4f3c4683b194445fd1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/fps/test.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python + +from tests.unit import unittest +import sys +import os +import os.path + +simple = True +advanced = False +if __name__ == "__main__": + devpath = os.path.relpath(os.path.join('..', '..'), + start=os.path.dirname(__file__)) + sys.path = [devpath] + sys.path + print '>>> advanced FPS tests; using local boto sources' + advanced = True + +from boto.fps.connection import FPSConnection +from boto.fps.response import ComplexAmount + + +class FPSTestCase(unittest.TestCase): + + def setUp(self): + self.fps = FPSConnection(host='fps.sandbox.amazonaws.com') + if advanced: + self.activity = self.fps.get_account_activity(\ + StartDate='2012-01-01') + result = self.activity.GetAccountActivityResult + self.transactions = result.Transaction + + @unittest.skipUnless(simple, "skipping simple test") + def test_get_account_balance(self): + response = self.fps.get_account_balance() + self.assertTrue(hasattr(response, 'GetAccountBalanceResult')) + self.assertTrue(hasattr(response.GetAccountBalanceResult, + 'AccountBalance')) + accountbalance = response.GetAccountBalanceResult.AccountBalance + self.assertTrue(hasattr(accountbalance, 'TotalBalance')) + self.assertIsInstance(accountbalance.TotalBalance, ComplexAmount) + self.assertTrue(hasattr(accountbalance, 'AvailableBalances')) + availablebalances = accountbalance.AvailableBalances + self.assertTrue(hasattr(availablebalances, 'RefundBalance')) + + @unittest.skipUnless(simple, "skipping simple test") + def test_complex_amount(self): + response = self.fps.get_account_balance() + accountbalance = response.GetAccountBalanceResult.AccountBalance + asfloat = float(accountbalance.TotalBalance.Value) + self.assertIn('.', str(asfloat)) + + @unittest.skipUnless(simple, "skipping simple test") + def test_required_arguments(self): + with self.assertRaises(KeyError): + self.fps.write_off_debt(AdjustmentAmount=123.45) + + @unittest.skipUnless(simple, "skipping simple test") + def test_cbui_url(self): + inputs = { + 'transactionAmount': 123.45, + 'pipelineName': 'SingleUse', + 'returnURL': 'https://localhost/', + 'paymentReason': 'a reason for payment', + 'callerReference': 'foo', + } + result = self.fps.cbui_url(**inputs) + print "cbui_url() yields {0}".format(result) + + @unittest.skipUnless(simple, "skipping simple test") + def test_get_account_activity(self): + response = self.fps.get_account_activity(StartDate='2012-01-01') + self.assertTrue(hasattr(response, 'GetAccountActivityResult')) + result = response.GetAccountActivityResult + self.assertTrue(hasattr(result, 'BatchSize')) + try: + int(result.BatchSize) + except: + self.assertTrue(False) + + @unittest.skipUnless(advanced, "skipping advanced test") + def test_get_transaction(self): + assert len(self.transactions) + transactionid = self.transactions[0].TransactionId + result = self.fps.get_transaction(TransactionId=transactionid) + self.assertTrue(hasattr(result.GetTransactionResult, 'Transaction')) + + @unittest.skip('cosmetic') + def test_bad_request(self): + try: + self.fps.write_off_debt(CreditInstrumentId='foo', + AdjustmentAmount=123.45) + except Exception, e: + print e + + @unittest.skip('cosmetic') + def test_repr(self): + print self.fps.get_account_balance() + + +if __name__ == "__main__": + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/fps/test_verify_signature.py b/desktop/core/ext-py/boto-2.38.0/tests/fps/test_verify_signature.py new file mode 100644 index 0000000000000000000000000000000000000000..efc037fb51431a15b86c3e2b0c79c079ab15023e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/fps/test_verify_signature.py @@ -0,0 +1,12 @@ +from boto.fps.connection import FPSConnection + +def test(): + conn = FPSConnection() + # example response from the docs + params = 'expiry=08%2F2015&signature=ynDukZ9%2FG77uSJVb5YM0cadwHVwYKPMKOO3PNvgADbv6VtymgBxeOWEhED6KGHsGSvSJnMWDN%2FZl639AkRe9Ry%2F7zmn9CmiM%2FZkp1XtshERGTqi2YL10GwQpaH17MQqOX3u1cW4LlyFoLy4celUFBPq1WM2ZJnaNZRJIEY%2FvpeVnCVK8VIPdY3HMxPAkNi5zeF2BbqH%2BL2vAWef6vfHkNcJPlOuOl6jP4E%2B58F24ni%2B9ek%2FQH18O4kw%2FUJ7ZfKwjCCI13%2BcFybpofcKqddq8CuUJj5Ii7Pdw1fje7ktzHeeNhF0r9siWcYmd4JaxTP3NmLJdHFRq2T%2FgsF3vK9m3gw%3D%3D&signatureVersion=2&signatureMethod=RSA-SHA1&certificateUrl=https%3A%2F%2Ffps.sandbox.amazonaws.com%2Fcerts%2F090909%2FPKICert.pem&tokenID=A5BB3HUNAZFJ5CRXIPH72LIODZUNAUZIVP7UB74QNFQDSQ9MN4HPIKISQZWPLJXF&status=SC&callerReference=callerReferenceMultiUse1' + endpoint = 'http://vamsik.desktop.amazon.com:8080/ipn.jsp' + conn.verify_signature(endpoint, params) + + +if __name__ == '__main__': + test() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f03a609b8f26e78cd94861afca9fc4462f627a52 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/__init__.py @@ -0,0 +1,63 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Base class to make checking the certs easier. +""" + + +# We subclass from ``object`` instead of ``TestCase`` here so that this doesn't +# add noise to the test suite (otherwise these no-ops would run on every +# import). +class ServiceCertVerificationTest(object): + ssl = True + + # SUBCLASSES MUST OVERRIDE THIS! + # Something like ``boto.sqs.regions()``... + regions = [] + + def test_certs(self): + self.assertTrue(len(self.regions) > 0) + + for region in self.regions: + special_access_required = False + + for snippet in ('gov', 'cn-'): + if snippet in region.name: + special_access_required = True + break + + try: + c = region.connect() + self.sample_service_call(c) + except: + # This is bad (because the SSL cert failed). Re-raise the + # exception. + if not special_access_required: + raise + + def sample_service_call(self, conn): + """ + Subclasses should override this method to do a service call that will + always succeed (like fetch a list, even if it's empty). + """ + pass diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/awslambda/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/awslambda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a3575e2ed1f4fb166a786d2a051f6d43d682ece2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/awslambda/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/awslambda/test_awslambda.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/awslambda/test_awslambda.py new file mode 100644 index 0000000000000000000000000000000000000000..8945922a57ac8b6c5f7981839bf3b5781ed63fa2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/awslambda/test_awslambda.py @@ -0,0 +1,38 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.awslambda.exceptions import ResourceNotFoundException +from tests.compat import unittest + + +class TestAWSLambda(unittest.TestCase): + def setUp(self): + self.awslambda = boto.connect_awslambda() + + def test_list_functions(self): + response = self.awslambda.list_functions() + self.assertIn('Functions', response) + + def test_resource_not_found_exceptions(self): + with self.assertRaises(ResourceNotFoundException): + self.awslambda.get_function(function_name='non-existant-function') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/beanstalk/test_wrapper.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/beanstalk/test_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..d84581fd5359a234f6815c711ee68d4731f6119b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/beanstalk/test_wrapper.py @@ -0,0 +1,209 @@ +import random +import time +from functools import partial + +from tests.compat import unittest +from boto.beanstalk.wrapper import Layer1Wrapper +import boto.beanstalk.response as response + + +class BasicSuite(unittest.TestCase): + def setUp(self): + self.random_id = str(random.randint(1, 1000000)) + self.app_name = 'app-' + self.random_id + self.app_version = 'version-' + self.random_id + self.template = 'template-' + self.random_id + self.environment = 'environment-' + self.random_id + self.beanstalk = Layer1Wrapper() + + +class MiscSuite(BasicSuite): + def test_check_dns_availability(self): + result = self.beanstalk.check_dns_availability('amazon') + self.assertIsInstance(result, response.CheckDNSAvailabilityResponse, + 'incorrect response object returned') + self.assertFalse(result.available) + + +class TestApplicationObjects(BasicSuite): + def create_application(self): + # This method is used for any API calls that require an application + # object. This also adds a cleanup step to automatically delete the + # app when the test is finished. No assertions are performed + # here. If you want to validate create_application, don't use this + # method. + self.beanstalk.create_application(application_name=self.app_name) + self.addCleanup(partial(self.beanstalk.delete_application, + application_name=self.app_name)) + + def test_create_delete_application_version(self): + # This will create an app, create an app version, delete the app + # version, and delete the app. For each API call we check that the + # return type is what we expect and that a few attributes have the + # correct values. + app_result = self.beanstalk.create_application(application_name=self.app_name) + self.assertIsInstance(app_result, response.CreateApplicationResponse) + self.assertEqual(app_result.application.application_name, self.app_name) + + version_result = self.beanstalk.create_application_version( + application_name=self.app_name, version_label=self.app_version) + self.assertIsInstance(version_result, response.CreateApplicationVersionResponse) + self.assertEqual(version_result.application_version.version_label, + self.app_version) + result = self.beanstalk.delete_application_version( + application_name=self.app_name, version_label=self.app_version) + self.assertIsInstance(result, response.DeleteApplicationVersionResponse) + result = self.beanstalk.delete_application( + application_name=self.app_name + ) + self.assertIsInstance(result, response.DeleteApplicationResponse) + + def test_create_configuration_template(self): + self.create_application() + result = self.beanstalk.create_configuration_template( + application_name=self.app_name, template_name=self.template, + solution_stack_name='32bit Amazon Linux running Tomcat 6') + self.assertIsInstance( + result, response.CreateConfigurationTemplateResponse) + self.assertEqual(result.solution_stack_name, + '32bit Amazon Linux running Tomcat 6') + + def test_create_storage_location(self): + result = self.beanstalk.create_storage_location() + self.assertIsInstance(result, response.CreateStorageLocationResponse) + + def test_update_application(self): + self.create_application() + result = self.beanstalk.update_application(application_name=self.app_name) + self.assertIsInstance(result, response.UpdateApplicationResponse) + + def test_update_application_version(self): + self.create_application() + self.beanstalk.create_application_version( + application_name=self.app_name, version_label=self.app_version) + result = self.beanstalk.update_application_version( + application_name=self.app_name, version_label=self.app_version) + self.assertIsInstance( + result, response.UpdateApplicationVersionResponse) + + +class GetSuite(BasicSuite): + def test_describe_applications(self): + result = self.beanstalk.describe_applications() + self.assertIsInstance(result, response.DescribeApplicationsResponse) + + def test_describe_application_versions(self): + result = self.beanstalk.describe_application_versions() + self.assertIsInstance(result, + response.DescribeApplicationVersionsResponse) + + + def test_describe_configuration_options(self): + result = self.beanstalk.describe_configuration_options() + self.assertIsInstance(result, + response.DescribeConfigurationOptionsResponse) + + def test_12_describe_environments(self): + result = self.beanstalk.describe_environments() + self.assertIsInstance( + result, response.DescribeEnvironmentsResponse) + + def test_14_describe_events(self): + result = self.beanstalk.describe_events() + self.assertIsInstance(result, response.DescribeEventsResponse) + + def test_15_list_available_solution_stacks(self): + result = self.beanstalk.list_available_solution_stacks() + self.assertIsInstance( + result, response.ListAvailableSolutionStacksResponse) + self.assertIn('32bit Amazon Linux running Tomcat 6', + result.solution_stacks) + + + +class TestsWithEnvironment(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.random_id = str(random.randint(1, 1000000)) + cls.app_name = 'app-' + cls.random_id + cls.environment = 'environment-' + cls.random_id + cls.template = 'template-' + cls.random_id + + cls.beanstalk = Layer1Wrapper() + cls.beanstalk.create_application(application_name=cls.app_name) + cls.beanstalk.create_configuration_template( + application_name=cls.app_name, template_name=cls.template, + solution_stack_name='32bit Amazon Linux running Tomcat 6') + cls.app_version = 'version-' + cls.random_id + cls.beanstalk.create_application_version( + application_name=cls.app_name, version_label=cls.app_version) + cls.beanstalk.create_environment(cls.app_name, cls.environment, + template_name=cls.template) + cls.wait_for_env(cls.environment) + + @classmethod + def tearDownClass(cls): + cls.beanstalk.delete_application(application_name=cls.app_name, + terminate_env_by_force=True) + cls.wait_for_env(cls.environment, 'Terminated') + + @classmethod + def wait_for_env(cls, env_name, status='Ready'): + while not cls.env_ready(env_name, status): + time.sleep(15) + + @classmethod + def env_ready(cls, env_name, desired_status): + result = cls.beanstalk.describe_environments( + application_name=cls.app_name, environment_names=env_name) + status = result.environments[0].status + return status == desired_status + + def test_describe_environment_resources(self): + result = self.beanstalk.describe_environment_resources( + environment_name=self.environment) + self.assertIsInstance( + result, response.DescribeEnvironmentResourcesResponse) + + def test_describe_configuration_settings(self): + result = self.beanstalk.describe_configuration_settings( + application_name=self.app_name, environment_name=self.environment) + self.assertIsInstance( + result, response.DescribeConfigurationSettingsResponse) + + def test_request_environment_info(self): + result = self.beanstalk.request_environment_info( + environment_name=self.environment, info_type='tail') + self.assertIsInstance(result, response.RequestEnvironmentInfoResponse) + self.wait_for_env(self.environment) + result = self.beanstalk.retrieve_environment_info( + environment_name=self.environment, info_type='tail') + self.assertIsInstance(result, response.RetrieveEnvironmentInfoResponse) + + def test_rebuild_environment(self): + result = self.beanstalk.rebuild_environment( + environment_name=self.environment) + self.assertIsInstance(result, response.RebuildEnvironmentResponse) + self.wait_for_env(self.environment) + + def test_restart_app_server(self): + result = self.beanstalk.restart_app_server( + environment_name=self.environment) + self.assertIsInstance(result, response.RestartAppServerResponse) + self.wait_for_env(self.environment) + + def test_update_configuration_template(self): + result = self.beanstalk.update_configuration_template( + application_name=self.app_name, template_name=self.template) + self.assertIsInstance( + result, response.UpdateConfigurationTemplateResponse) + + def test_update_environment(self): + result = self.beanstalk.update_environment( + environment_name=self.environment) + self.assertIsInstance(result, response.UpdateEnvironmentResponse) + self.wait_for_env(self.environment) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudformation/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudformation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b7fe4c2259e950fde9bf243fe43614898f197076 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudformation/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudformation/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudformation/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..6dede2f96e846e6fed532b3eaa57438d47e7413f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudformation/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.cloudformation + + +class CloudFormationCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + cloudformation = True + regions = boto.cloudformation.regions() + + def sample_service_call(self, conn): + conn.describe_stacks() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudformation/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudformation/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..4cafb0fe58fb2ca21e21392191b33592944359d4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudformation/test_connection.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python +import time +import json + +from tests.unit import unittest +from boto.cloudformation.connection import CloudFormationConnection + + +BASIC_EC2_TEMPLATE = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "AWS CloudFormation Sample Template EC2InstanceSample", + "Parameters": { + "Parameter1": { + "Description": "Test Parameter 1", + "Type": "String" + }, + "Parameter2": { + "Description": "Test Parameter 2", + "Type": "String" + } + }, + "Mappings": { + "RegionMap": { + "us-east-1": { + "AMI": "ami-7f418316" + } + } + }, + "Resources": { + "Ec2Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionMap", + { + "Ref": "AWS::Region" + }, + "AMI" + ] + }, + "UserData": { + "Fn::Base64": { + "Fn::Join":[ + "", + [{"Ref": "Parameter1"}, + {"Ref": "Parameter2"}] + ] + } + + } + } + } + }, + "Outputs": { + "InstanceId": { + "Description": "InstanceId of the newly created EC2 instance", + "Value": { + "Ref": "Ec2Instance" + } + }, + "AZ": { + "Description": "Availability Zone of the newly created EC2 instance", + "Value": { + "Fn::GetAtt": [ + "Ec2Instance", + "AvailabilityZone" + ] + } + }, + "PublicIP": { + "Description": "Public IP address of the newly created EC2 instance", + "Value": { + "Fn::GetAtt": [ + "Ec2Instance", + "PublicIp" + ] + } + }, + "PrivateIP": { + "Description": "Private IP address of the newly created EC2 instance", + "Value": { + "Fn::GetAtt": [ + "Ec2Instance", + "PrivateIp" + ] + } + }, + "PublicDNS": { + "Description": "Public DNSName of the newly created EC2 instance", + "Value": { + "Fn::GetAtt": [ + "Ec2Instance", + "PublicDnsName" + ] + } + }, + "PrivateDNS": { + "Description": "Private DNSName of the newly created EC2 instance", + "Value": { + "Fn::GetAtt": [ + "Ec2Instance", + "PrivateDnsName" + ] + } + } + } +} + + +class TestCloudformationConnection(unittest.TestCase): + def setUp(self): + self.connection = CloudFormationConnection() + self.stack_name = 'testcfnstack' + str(int(time.time())) + + def test_large_template_stack_size(self): + # See https://github.com/boto/boto/issues/1037 + body = self.connection.create_stack( + self.stack_name, + template_body=json.dumps(BASIC_EC2_TEMPLATE), + parameters=[('Parameter1', 'initial_value'), + ('Parameter2', 'initial_value')]) + self.addCleanup(self.connection.delete_stack, self.stack_name) + + # A newly created stack should have events + events = self.connection.describe_stack_events(self.stack_name) + self.assertTrue(events) + + # No policy should be set on the stack by default + policy = self.connection.get_stack_policy(self.stack_name) + self.assertEqual(None, policy) + + # Our new stack should show up in the stack list + stacks = self.connection.describe_stacks(self.stack_name) + stack = stacks[0] + self.assertEqual(self.stack_name, stack.stack_name) + + params = [(p.key, p.value) for p in stack.parameters] + self.assertEquals([('Parameter1', 'initial_value'), + ('Parameter2', 'initial_value')], params) + + for _ in range(30): + stack.update() + if stack.stack_status.find("PROGRESS") == -1: + break + time.sleep(5) + + body = self.connection.update_stack( + self.stack_name, + template_body=json.dumps(BASIC_EC2_TEMPLATE), + parameters=[('Parameter1', '', True), + ('Parameter2', 'updated_value')]) + + stacks = self.connection.describe_stacks(self.stack_name) + stack = stacks[0] + params = [(p.key, p.value) for p in stacks[0].parameters] + self.assertEquals([('Parameter1', 'initial_value'), + ('Parameter2', 'updated_value')], params) + + # Waiting for the update to complete to unblock the delete_stack in the + # cleanup. + for _ in range(30): + stack.update() + if stack.stack_status.find("PROGRESS") == -1: + break + time.sleep(5) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudhsm/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudhsm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a3575e2ed1f4fb166a786d2a051f6d43d682ece2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudhsm/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudhsm/test_cloudhsm.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudhsm/test_cloudhsm.py new file mode 100644 index 0000000000000000000000000000000000000000..0965d0d3400cb1a6675ecfe3c233ed548d05ce12 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudhsm/test_cloudhsm.py @@ -0,0 +1,44 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from tests.compat import unittest +from boto.cloudhsm.exceptions import InvalidRequestException + + +class TestCloudHSM(unittest.TestCase): + def setUp(self): + self.cloudhsm = boto.connect_cloudhsm() + + def test_hapgs(self): + label = 'my-hapg' + response = self.cloudhsm.create_hapg(label=label) + hapg_arn = response['HapgArn'] + self.addCleanup(self.cloudhsm.delete_hapg, hapg_arn) + + response = self.cloudhsm.list_hapgs() + self.assertIn(hapg_arn, response['HapgList']) + + def test_validation_exception(self): + invalid_arn = 'arn:aws:cloudhsm:us-east-1:123456789012:hapg-55214b8d' + with self.assertRaises(InvalidRequestException): + self.cloudhsm.describe_hapg(invalid_arn) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b7fe4c2259e950fde9bf243fe43614898f197076 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..338c2ac7f401e127110949f5d8d3fb728d90ead2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.cloudsearch + + +class CloudSearchCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + cloudsearch = True + regions = boto.cloudsearch.regions() + + def sample_service_call(self, conn): + conn.describe_domains() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch/test_layers.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch/test_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..1db1d5a0c565d7f1b91f967aef159b9b57837a20 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch/test_layers.py @@ -0,0 +1,75 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests for Layer1 of Cloudsearch +""" +import time + +from tests.unit import unittest +from boto.cloudsearch.layer1 import Layer1 +from boto.cloudsearch.layer2 import Layer2 +from boto.regioninfo import RegionInfo + + +class CloudSearchLayer1Test(unittest.TestCase): + cloudsearch = True + + def setUp(self): + super(CloudSearchLayer1Test, self).setUp() + self.layer1 = Layer1() + self.domain_name = 'test-%d' % int(time.time()) + + def test_create_domain(self): + resp = self.layer1.create_domain(self.domain_name) + self.addCleanup(self.layer1.delete_domain, self.domain_name) + self.assertTrue(resp.get('created', False)) + + +class CloudSearchLayer2Test(unittest.TestCase): + cloudsearch = True + + def setUp(self): + super(CloudSearchLayer2Test, self).setUp() + self.layer2 = Layer2() + self.domain_name = 'test-%d' % int(time.time()) + + def test_create_domain(self): + domain = self.layer2.create_domain(self.domain_name) + self.addCleanup(domain.delete) + self.assertTrue(domain.created, False) + self.assertEqual(domain.domain_name, self.domain_name) + self.assertEqual(domain.num_searchable_docs, 0) + + def test_initialization_regression(self): + us_west_2 = RegionInfo( + name='us-west-2', + endpoint='cloudsearch.us-west-2.amazonaws.com' + ) + self.layer2 = Layer2( + region=us_west_2, + host='cloudsearch.us-west-2.amazonaws.com' + ) + self.assertEqual( + self.layer2.layer1.host, + 'cloudsearch.us-west-2.amazonaws.com' + ) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch2/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b7fe4c2259e950fde9bf243fe43614898f197076 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch2/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch2/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch2/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..a2ab6541d17a06af09350b33592fa3ab635e5960 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch2/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.cloudsearch2 + + +class CloudSearchCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + cloudsearch = True + regions = boto.cloudsearch2.regions() + + def sample_service_call(self, conn): + conn.describe_domains() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch2/test_layers.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch2/test_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..d2b1375647d2606b36b9139e778eb2892b0e3380 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudsearch2/test_layers.py @@ -0,0 +1,79 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests for Layer1 of Cloudsearch +""" +import time + +from tests.unit import unittest +from boto.cloudsearch2.layer1 import CloudSearchConnection +from boto.cloudsearch2.layer2 import Layer2 +from boto.regioninfo import RegionInfo + + +class CloudSearchLayer1Test(unittest.TestCase): + cloudsearch = True + + def setUp(self): + super(CloudSearchLayer1Test, self).setUp() + self.layer1 = CloudSearchConnection() + self.domain_name = 'test-%d' % int(time.time()) + + def test_create_domain(self): + resp = self.layer1.create_domain(self.domain_name) + + resp = (resp['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + self.addCleanup(self.layer1.delete_domain, self.domain_name) + self.assertTrue(resp.get('Created', False)) + + +class CloudSearchLayer2Test(unittest.TestCase): + cloudsearch = True + + def setUp(self): + super(CloudSearchLayer2Test, self).setUp() + self.layer2 = Layer2() + self.domain_name = 'test-%d' % int(time.time()) + + def test_create_domain(self): + domain = self.layer2.create_domain(self.domain_name) + self.addCleanup(domain.delete) + self.assertTrue(domain.created, False) + self.assertEqual(domain.domain_name, self.domain_name) + + def test_initialization_regression(self): + us_west_2 = RegionInfo( + name='us-west-2', + endpoint='cloudsearch.us-west-2.amazonaws.com' + ) + self.layer2 = Layer2( + region=us_west_2, + host='cloudsearch.us-west-2.amazonaws.com' + ) + self.assertEqual( + self.layer2.layer1.host, + 'cloudsearch.us-west-2.amazonaws.com' + ) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudtrail/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudtrail/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudtrail/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudtrail/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..321efea6bf3f854afb252f1f01573ede15dec83f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudtrail/test_cert_verification.py @@ -0,0 +1,38 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.cloudtrail + + +class CloudTrailCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + cloudtrail = True + regions = boto.cloudtrail.regions() + + def sample_service_call(self, conn): + conn.describe_trails() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudtrail/test_cloudtrail.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudtrail/test_cloudtrail.py new file mode 100644 index 0000000000000000000000000000000000000000..3e90fbeb2a9dca5707eac0c57d5640e39c4f2757 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cloudtrail/test_cloudtrail.py @@ -0,0 +1,91 @@ +import boto + +from time import time +from tests.compat import unittest + +DEFAULT_S3_POLICY = """{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AWSCloudTrailAclCheck20131101", + "Effect": "Allow", + "Principal": { + "AWS": [ + "arn:aws:iam::086441151436:root", + "arn:aws:iam::113285607260:root" + ] + }, + "Action": "s3:GetBucketAcl", + "Resource": "arn:aws:s3:::" + }, + { + "Sid": "AWSCloudTrailWrite20131101", + "Effect": "Allow", + "Principal": { + "AWS": [ + "arn:aws:iam::086441151436:root", + "arn:aws:iam::113285607260:root" + ] + }, + "Action": "s3:PutObject", + "Resource": "arn:aws:s3::://AWSLogs//*", + "Condition": { + "StringEquals": { + "s3:x-amz-acl": "bucket-owner-full-control" + } + } + } + ] +}""" + +class TestCloudTrail(unittest.TestCase): + def test_cloudtrail(self): + cloudtrail = boto.connect_cloudtrail() + + # Don't delete existing customer data! + res = cloudtrail.describe_trails() + if len(res['trailList']): + self.fail('A trail already exists on this account!') + + # Who am I? + iam = boto.connect_iam() + response = iam.get_user() + account_id = response['get_user_response']['get_user_result'] \ + ['user']['user_id'] + + # Setup a new bucket + s3 = boto.connect_s3() + bucket_name = 'cloudtrail-integ-{0}'.format(time()) + policy = DEFAULT_S3_POLICY.replace('', bucket_name)\ + .replace('', account_id)\ + .replace('/', '') + b = s3.create_bucket(bucket_name) + b.set_policy(policy) + + # Setup CloudTrail + cloudtrail.create_trail(trail={'Name': 'test', 'S3BucketName': bucket_name}) + + cloudtrail.update_trail(trail={'Name': 'test', 'IncludeGlobalServiceEvents': False}) + + trails = cloudtrail.describe_trails() + + self.assertEqual('test', trails['trailList'][0]['Name']) + self.assertFalse(trails['trailList'][0]['IncludeGlobalServiceEvents']) + + cloudtrail.start_logging(name='test') + + status = cloudtrail.get_trail_status(name='test') + self.assertTrue(status['IsLogging']) + + cloudtrail.stop_logging(name='test') + + status = cloudtrail.get_trail_status(name='test') + self.assertFalse(status['IsLogging']) + + # Clean up + cloudtrail.delete_trail(name='test') + + for key in b.list(): + key.delete() + + s3.delete_bucket(bucket_name) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/codedeploy/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/codedeploy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a3575e2ed1f4fb166a786d2a051f6d43d682ece2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/codedeploy/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/codedeploy/test_codedeploy.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/codedeploy/test_codedeploy.py new file mode 100644 index 0000000000000000000000000000000000000000..469b4faf3e0da93eae2c267dd95221687793ae57 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/codedeploy/test_codedeploy.py @@ -0,0 +1,41 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.codedeploy.exceptions import ApplicationDoesNotExistException +from tests.compat import unittest + + +class TestCodeDeploy(unittest.TestCase): + def setUp(self): + self.codedeploy = boto.connect_codedeploy() + + def test_applications(self): + application_name = 'my-boto-application' + self.codedeploy.create_application(application_name=application_name) + self.addCleanup(self.codedeploy.delete_application, application_name) + response = self.codedeploy.list_applications() + self.assertIn(application_name, response['applications']) + + def test_exception(self): + with self.assertRaises(ApplicationDoesNotExistException): + self.codedeploy.get_application('some-non-existant-app') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..571bc50da23f4320fa69e51186f6b645b3b116af --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from tests.compat import unittest + + +class CognitoTest(unittest.TestCase): + def setUp(self): + self.cognito_identity = boto.connect_cognito_identity() + self.cognito_sync = boto.connect_cognito_sync() + self.identity_pool_name = 'myIdentityPool' + response = self.cognito_identity.create_identity_pool( + identity_pool_name=self.identity_pool_name, + allow_unauthenticated_identities=False + ) + self.identity_pool_id = response['IdentityPoolId'] + + def tearDown(self): + self.cognito_identity.delete_identity_pool( + identity_pool_id=self.identity_pool_id + ) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/identity/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/identity/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70cc23febffdfb7a2de035d163e75a400a9c82ee --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/identity/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/identity/test_cognito_identity.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/identity/test_cognito_identity.py new file mode 100644 index 0000000000000000000000000000000000000000..7d9f6647ef28b231a100c6de90ff806a16c51581 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/identity/test_cognito_identity.py @@ -0,0 +1,52 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.cognito.identity.exceptions import ResourceNotFoundException +from tests.integration.cognito import CognitoTest + + +class TestCognitoIdentity(CognitoTest): + """ + Test Cognitoy identity pools operations since individual Cognito identities + require an AWS account ID. + """ + def test_cognito_identity(self): + # Ensure the identity pool is in the list of pools. + response = self.cognito_identity.list_identity_pools(max_results=5) + expected_identity = {'IdentityPoolId': self.identity_pool_id, + 'IdentityPoolName': self.identity_pool_name} + self.assertIn(expected_identity, response['IdentityPools']) + + # Ensure the pool's attributes are as expected. + response = self.cognito_identity.describe_identity_pool( + identity_pool_id=self.identity_pool_id + ) + self.assertEqual(response['IdentityPoolName'], self.identity_pool_name) + self.assertEqual(response['IdentityPoolId'], self.identity_pool_id) + self.assertFalse(response['AllowUnauthenticatedIdentities']) + + def test_resource_not_found_exception(self): + with self.assertRaises(ResourceNotFoundException): + # Note the region is us-east-0 which is an invalid region name. + self.cognito_identity.describe_identity_pool( + identity_pool_id='us-east-0:c09e640-b014-4822-86b9-ec77c40d8d6f' + ) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/sync/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/sync/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70cc23febffdfb7a2de035d163e75a400a9c82ee --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/sync/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/sync/test_cognito_sync.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/sync/test_cognito_sync.py new file mode 100644 index 0000000000000000000000000000000000000000..fdfd40ee5bdfd8586efef116d7d38f79dc014bea --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/cognito/sync/test_cognito_sync.py @@ -0,0 +1,46 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.cognito.sync.exceptions import ResourceNotFoundException +from tests.integration.cognito import CognitoTest + + +class TestCognitoSync(CognitoTest): + """ + Even more so for Cognito Sync, Cognito identites are required. However, + AWS account IDs are required to aqcuire a Cognito identity so only + Cognito pool identity related operations are tested. + """ + def test_cognito_sync(self): + response = self.cognito_sync.describe_identity_pool_usage( + identity_pool_id=self.identity_pool_id + ) + identity_pool_usage = response['IdentityPoolUsage'] + self.assertEqual(identity_pool_usage['SyncSessionsCount'], None) + self.assertEqual(identity_pool_usage['DataStorage'], 0) + + def test_resource_not_found_exception(self): + with self.assertRaises(ResourceNotFoundException): + # Note the region is us-east-0 which is an invalid region name. + self.cognito_sync.describe_identity_pool_usage( + identity_pool_id='us-east-0:c09e640-b014-4822-86b9-ec77c40d8d6f' + ) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/configservice/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/configservice/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a3575e2ed1f4fb166a786d2a051f6d43d682ece2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/configservice/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/configservice/test_configservice.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/configservice/test_configservice.py new file mode 100644 index 0000000000000000000000000000000000000000..4a7e2dcf66bee20cdf1e32f5ae006f34efe90019 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/configservice/test_configservice.py @@ -0,0 +1,44 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.configservice.exceptions import NoSuchConfigurationRecorderException +from tests.compat import unittest + + +class TestConfigService(unittest.TestCase): + def setUp(self): + self.configservice = boto.connect_configservice() + + def test_describe_configuration_recorders(self): + response = self.configservice.describe_configuration_recorders() + self.assertIn('ConfigurationRecorders', response) + + def test_handle_no_such_configuration_recorder(self): + with self.assertRaises(NoSuchConfigurationRecorderException): + self.configservice.describe_configuration_recorders( + configuration_recorder_names=['non-existant-recorder']) + + def test_connect_to_non_us_east_1(self): + self.configservice = boto.configservice.connect_to_region('us-west-2') + response = self.configservice.describe_configuration_recorders() + self.assertIn('ConfigurationRecorders', response) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/datapipeline/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/datapipeline/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/datapipeline/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/datapipeline/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..e30a433fc35798b95c3a9df388b4d9db48792f7d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/datapipeline/test_cert_verification.py @@ -0,0 +1,38 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.datapipeline + + +class DatapipelineCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + datapipeline = True + regions = boto.datapipeline.regions() + + def sample_service_call(self, conn): + conn.list_pipelines() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/datapipeline/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/datapipeline/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..6634770df497c0f14c34a1ff4cc781eccad29878 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/datapipeline/test_layer1.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import time +from tests.unit import unittest + +from boto.datapipeline import layer1 + + +class TestDataPipeline(unittest.TestCase): + datapipeline = True + + def setUp(self): + self.connection = layer1.DataPipelineConnection() + self.sample_pipeline_objects = [ + {'fields': [ + {'key': 'workerGroup', 'stringValue': 'MyworkerGroup'}], + 'id': 'Default', + 'name': 'Default'}, + {'fields': [ + {'key': 'startDateTime', 'stringValue': '2012-09-25T17:00:00'}, + {'key': 'type', 'stringValue': 'Schedule'}, + {'key': 'period', 'stringValue': '1 hour'}, + {'key': 'endDateTime', 'stringValue': '2012-09-25T18:00:00'}], + 'id': 'Schedule', + 'name': 'Schedule'}, + {'fields': [ + {'key': 'type', 'stringValue': 'ShellCommandActivity'}, + {'key': 'command', 'stringValue': 'echo hello'}, + {'key': 'parent', 'refValue': 'Default'}, + {'key': 'schedule', 'refValue': 'Schedule'}], + 'id': 'SayHello', + 'name': 'SayHello'} + ] + self.connection.auth_service_name = 'datapipeline' + + def create_pipeline(self, name, unique_id, description=None): + response = self.connection.create_pipeline(name, unique_id, + description) + pipeline_id = response['pipelineId'] + self.addCleanup(self.connection.delete_pipeline, pipeline_id) + return pipeline_id + + def get_pipeline_state(self, pipeline_id): + response = self.connection.describe_pipelines([pipeline_id]) + for attr in response['pipelineDescriptionList'][0]['fields']: + if attr['key'] == '@pipelineState': + return attr['stringValue'] + + def test_can_create_and_delete_a_pipeline(self): + response = self.connection.create_pipeline('name', 'unique_id', + 'description') + self.connection.delete_pipeline(response['pipelineId']) + + def test_validate_pipeline(self): + pipeline_id = self.create_pipeline('name2', 'unique_id2') + + self.connection.validate_pipeline_definition( + self.sample_pipeline_objects, pipeline_id) + + def test_put_pipeline_definition(self): + pipeline_id = self.create_pipeline('name3', 'unique_id3') + self.connection.put_pipeline_definition(self.sample_pipeline_objects, + pipeline_id) + + # We should now be able to get the pipeline definition and see + # that it matches what we put. + response = self.connection.get_pipeline_definition(pipeline_id) + objects = response['pipelineObjects'] + self.assertEqual(len(objects), 3) + self.assertEqual(objects[0]['id'], 'Default') + self.assertEqual(objects[0]['name'], 'Default') + self.assertEqual(objects[0]['fields'], + [{'key': 'workerGroup', 'stringValue': 'MyworkerGroup'}]) + + def test_activate_pipeline(self): + pipeline_id = self.create_pipeline('name4', 'unique_id4') + self.connection.put_pipeline_definition(self.sample_pipeline_objects, + pipeline_id) + self.connection.activate_pipeline(pipeline_id) + + attempts = 0 + state = self.get_pipeline_state(pipeline_id) + while state != 'SCHEDULED' and attempts < 10: + time.sleep(10) + attempts += 1 + state = self.get_pipeline_state(pipeline_id) + if attempts > 10: + self.fail("Pipeline did not become scheduled " + "after 10 attempts.") + objects = self.connection.describe_objects(['Default'], pipeline_id) + field = objects['pipelineObjects'][0]['fields'][0] + self.assertDictEqual(field, {'stringValue': 'COMPONENT', 'key': '@sphere'}) + + def test_list_pipelines(self): + pipeline_id = self.create_pipeline('name5', 'unique_id5') + pipeline_id_list = [p['id'] for p in + self.connection.list_pipelines()['pipelineIdList']] + self.assertTrue(pipeline_id in pipeline_id_list) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/directconnect/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/directconnect/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/directconnect/test_directconnect.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/directconnect/test_directconnect.py new file mode 100644 index 0000000000000000000000000000000000000000..c8dde2c9e201e9226e79adcbc6ea647ff65b5065 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/directconnect/test_directconnect.py @@ -0,0 +1,40 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import boto + +from tests.compat import unittest + + +class DirectConnectTest(unittest.TestCase): + """ + A very basic test to make sure signatures and + basic calls work. + """ + def test_basic(self): + conn = boto.connect_directconnect() + + response = conn.describe_connections() + + self.assertTrue(response) + self.assertTrue('connections' in response) + self.assertIsInstance(response['connections'], list) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..354aa06fe0d42ad6f6ba0e0d11446c066c7f0da4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..ce94b75331adc961ec831b89f6bf0ac4760a947a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.dynamodb + + +class DynamoDBCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + dynamodb = True + regions = boto.dynamodb.regions() + + def sample_service_call(self, conn): + conn.layer1.list_tables() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..d51c0f6bd18dca2aec4b500327520d8eb7e78c5f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_layer1.py @@ -0,0 +1,266 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests for Layer1 of DynamoDB +""" +import time +import base64 + +from tests.unit import unittest +from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError +from boto.dynamodb.exceptions import DynamoDBConditionalCheckFailedError +from boto.dynamodb.exceptions import DynamoDBValidationError +from boto.dynamodb.layer1 import Layer1 + + +class DynamoDBLayer1Test(unittest.TestCase): + dynamodb = True + + def setUp(self): + self.dynamodb = Layer1() + self.table_name = 'test-%d' % int(time.time()) + self.hash_key_name = 'forum_name' + self.hash_key_type = 'S' + self.range_key_name = 'subject' + self.range_key_type = 'S' + self.read_units = 5 + self.write_units = 5 + self.schema = {'HashKeyElement': {'AttributeName': self.hash_key_name, + 'AttributeType': self.hash_key_type}, + 'RangeKeyElement': {'AttributeName': self.range_key_name, + 'AttributeType': self.range_key_type}} + self.provisioned_throughput = {'ReadCapacityUnits': self.read_units, + 'WriteCapacityUnits': self.write_units} + + def tearDown(self): + pass + + def create_table(self, table_name, schema, provisioned_throughput): + result = self.dynamodb.create_table(table_name, schema, provisioned_throughput) + self.addCleanup(self.dynamodb.delete_table, table_name) + return result + + def test_layer1_basic(self): + print('--- running DynamoDB Layer1 tests ---') + + c = self.dynamodb + + # First create a table + table_name = self.table_name + hash_key_name = self.hash_key_name + hash_key_type = self.hash_key_type + range_key_name = self.range_key_name + range_key_type = self.range_key_type + read_units = self.read_units + write_units = self.write_units + schema = self.schema + provisioned_throughput = self.provisioned_throughput + + result = self.create_table(table_name, schema, provisioned_throughput) + assert result['TableDescription']['TableName'] == table_name + result_schema = result['TableDescription']['KeySchema'] + assert result_schema['HashKeyElement']['AttributeName'] == hash_key_name + assert result_schema['HashKeyElement']['AttributeType'] == hash_key_type + assert result_schema['RangeKeyElement']['AttributeName'] == range_key_name + assert result_schema['RangeKeyElement']['AttributeType'] == range_key_type + result_thruput = result['TableDescription']['ProvisionedThroughput'] + assert result_thruput['ReadCapacityUnits'] == read_units + assert result_thruput['WriteCapacityUnits'] == write_units + + # Wait for table to become active + result = c.describe_table(table_name) + while result['Table']['TableStatus'] != 'ACTIVE': + time.sleep(5) + result = c.describe_table(table_name) + + # List tables and make sure new one is there + result = c.list_tables() + assert table_name in result['TableNames'] + + # Update the tables ProvisionedThroughput + new_read_units = 10 + new_write_units = 5 + new_provisioned_throughput = {'ReadCapacityUnits': new_read_units, + 'WriteCapacityUnits': new_write_units} + result = c.update_table(table_name, new_provisioned_throughput) + + # Wait for table to be updated + result = c.describe_table(table_name) + while result['Table']['TableStatus'] == 'UPDATING': + time.sleep(5) + result = c.describe_table(table_name) + + result_thruput = result['Table']['ProvisionedThroughput'] + assert result_thruput['ReadCapacityUnits'] == new_read_units + assert result_thruput['WriteCapacityUnits'] == new_write_units + + # Put an item + item1_key = 'Amazon DynamoDB' + item1_range = 'DynamoDB Thread 1' + item1_data = { + hash_key_name: {hash_key_type: item1_key}, + range_key_name: {range_key_type: item1_range}, + 'Message': {'S': 'DynamoDB thread 1 message text'}, + 'LastPostedBy': {'S': 'User A'}, + 'Views': {'N': '0'}, + 'Replies': {'N': '0'}, + 'Answered': {'N': '0'}, + 'Tags': {'SS': ["index", "primarykey", "table"]}, + 'LastPostDateTime': {'S': '12/9/2011 11:36:03 PM'} + } + result = c.put_item(table_name, item1_data) + + # Now do a consistent read and check results + key1 = {'HashKeyElement': {hash_key_type: item1_key}, + 'RangeKeyElement': {range_key_type: item1_range}} + result = c.get_item(table_name, key=key1, consistent_read=True) + for name in item1_data: + assert name in result['Item'] + + # Try to get an item that does not exist. + invalid_key = {'HashKeyElement': {hash_key_type: 'bogus_key'}, + 'RangeKeyElement': {range_key_type: item1_range}} + self.assertRaises(DynamoDBKeyNotFoundError, + c.get_item, table_name, key=invalid_key) + + # Try retrieving only select attributes + attributes = ['Message', 'Views'] + result = c.get_item(table_name, key=key1, consistent_read=True, + attributes_to_get=attributes) + for name in result['Item']: + assert name in attributes + + # Try to delete the item with the wrong Expected value + expected = {'Views': {'Value': {'N': '1'}}} + self.assertRaises(DynamoDBConditionalCheckFailedError, + c.delete_item, table_name, key=key1, + expected=expected) + + # Now update the existing object + attribute_updates = {'Views': {'Value': {'N': '5'}, + 'Action': 'PUT'}, + 'Tags': {'Value': {'SS': ['foobar']}, + 'Action': 'ADD'}} + result = c.update_item(table_name, key=key1, + attribute_updates=attribute_updates) + + # Try and update an item, in a fashion which makes it too large. + # The new message text is the item size limit minus 32 bytes and + # the current object is larger than 32 bytes. + item_size_overflow_text = 'Text to be padded'.zfill(64 * 1024 - 32) + attribute_updates = {'Message': {'Value': {'S': item_size_overflow_text}, + 'Action': 'PUT'}} + self.assertRaises(DynamoDBValidationError, + c.update_item, table_name, key=key1, + attribute_updates=attribute_updates) + + + # Put a few more items into the table + item2_key = 'Amazon DynamoDB' + item2_range = 'DynamoDB Thread 2' + item2_data = { + hash_key_name: {hash_key_type: item2_key}, + range_key_name: {range_key_type: item2_range}, + 'Message': {'S': 'DynamoDB thread 2 message text'}, + 'LastPostedBy': {'S': 'User A'}, + 'Views': {'N': '0'}, + 'Replies': {'N': '0'}, + 'Answered': {'N': '0'}, + 'Tags': {'SS': ["index", "primarykey", "table"]}, + 'LastPostDateTime': {'S': '12/9/2011 11:36:03 PM'} + } + result = c.put_item(table_name, item2_data) + key2 = {'HashKeyElement': {hash_key_type: item2_key}, + 'RangeKeyElement': {range_key_type: item2_range}} + + item3_key = 'Amazon S3' + item3_range = 'S3 Thread 1' + item3_data = { + hash_key_name: {hash_key_type: item3_key}, + range_key_name: {range_key_type: item3_range}, + 'Message': {'S': 'S3 Thread 1 message text'}, + 'LastPostedBy': {'S': 'User A'}, + 'Views': {'N': '0'}, + 'Replies': {'N': '0'}, + 'Answered': {'N': '0'}, + 'Tags': {'SS': ['largeobject', 'multipart upload']}, + 'LastPostDateTime': {'S': '12/9/2011 11:36:03 PM'} + } + result = c.put_item(table_name, item3_data) + key3 = {'HashKeyElement': {hash_key_type: item3_key}, + 'RangeKeyElement': {range_key_type: item3_range}} + + # Try a few queries + result = c.query(table_name, {'S': 'Amazon DynamoDB'}, + {'AttributeValueList': [{'S': 'DynamoDB'}], + 'ComparisonOperator': 'BEGINS_WITH'}) + assert 'Count' in result + assert result['Count'] == 2 + + # Try a few scans + result = c.scan(table_name, + {'Tags': {'AttributeValueList': [{'S': 'table'}], + 'ComparisonOperator': 'CONTAINS'}}) + assert 'Count' in result + assert result['Count'] == 2 + + # Now delete the items + result = c.delete_item(table_name, key=key1) + result = c.delete_item(table_name, key=key2) + result = c.delete_item(table_name, key=key3) + + print('--- tests completed ---') + + def test_binary_attributes(self): + c = self.dynamodb + result = self.create_table(self.table_name, self.schema, + self.provisioned_throughput) + # Wait for table to become active + result = c.describe_table(self.table_name) + while result['Table']['TableStatus'] != 'ACTIVE': + time.sleep(5) + result = c.describe_table(self.table_name) + + # Put an item + item1_key = 'Amazon DynamoDB' + item1_range = 'DynamoDB Thread 1' + item1_data = { + self.hash_key_name: {self.hash_key_type: item1_key}, + self.range_key_name: {self.range_key_type: item1_range}, + 'Message': {'S': 'DynamoDB thread 1 message text'}, + 'LastPostedBy': {'S': 'User A'}, + 'Views': {'N': '0'}, + 'Replies': {'N': '0'}, + 'BinaryData': {'B': base64.b64encode(b'\x01\x02\x03\x04').decode('utf-8')}, + 'Answered': {'N': '0'}, + 'Tags': {'SS': ["index", "primarykey", "table"]}, + 'LastPostDateTime': {'S': '12/9/2011 11:36:03 PM'} + } + result = c.put_item(self.table_name, item1_data) + + # Now do a consistent read and check results + key1 = {'HashKeyElement': {self.hash_key_type: item1_key}, + 'RangeKeyElement': {self.range_key_type: item1_range}} + result = c.get_item(self.table_name, key=key1, consistent_read=True) + self.assertEqual(result['Item']['BinaryData'], + {'B': base64.b64encode(b'\x01\x02\x03\x04').decode('utf-8')}) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_layer2.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_layer2.py new file mode 100644 index 0000000000000000000000000000000000000000..b56562a7151380788e226155c49f0343caff3baf --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_layer2.py @@ -0,0 +1,496 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests for Layer2 of Amazon DynamoDB +""" +import time +import uuid +from decimal import Decimal + +from tests.unit import unittest +from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError +from boto.dynamodb.exceptions import DynamoDBConditionalCheckFailedError +from boto.dynamodb.layer2 import Layer2 +from boto.dynamodb.types import get_dynamodb_type, Binary +from boto.dynamodb.condition import BEGINS_WITH, CONTAINS, GT +from boto.compat import six, long_type + + +class DynamoDBLayer2Test(unittest.TestCase): + dynamodb = True + + def setUp(self): + self.dynamodb = Layer2() + self.hash_key_name = 'forum_name' + self.hash_key_proto_value = '' + self.range_key_name = 'subject' + self.range_key_proto_value = '' + self.table_name = 'sample_data_%s' % int(time.time()) + + def create_sample_table(self): + schema = self.dynamodb.create_schema( + self.hash_key_name, self.hash_key_proto_value, + self.range_key_name, + self.range_key_proto_value) + table = self.create_table(self.table_name, schema, 5, 5) + table.refresh(wait_for_active=True) + return table + + def create_table(self, table_name, schema, read_units, write_units): + result = self.dynamodb.create_table(table_name, schema, read_units, write_units) + self.addCleanup(self.dynamodb.delete_table, result) + return result + + def test_layer2_basic(self): + print('--- running Amazon DynamoDB Layer2 tests ---') + c = self.dynamodb + + # First create a schema for the table + schema = c.create_schema(self.hash_key_name, self.hash_key_proto_value, + self.range_key_name, + self.range_key_proto_value) + + # Create another schema without a range key + schema2 = c.create_schema('post_id', '') + + # Now create a table + index = int(time.time()) + table_name = 'test-%d' % index + read_units = 5 + write_units = 5 + table = self.create_table(table_name, schema, read_units, write_units) + assert table.name == table_name + assert table.schema.hash_key_name == self.hash_key_name + assert table.schema.hash_key_type == get_dynamodb_type(self.hash_key_proto_value) + assert table.schema.range_key_name == self.range_key_name + assert table.schema.range_key_type == get_dynamodb_type(self.range_key_proto_value) + assert table.read_units == read_units + assert table.write_units == write_units + assert table.item_count == 0 + assert table.size_bytes == 0 + + # Create the second table + table2_name = 'test-%d' % (index + 1) + table2 = self.create_table(table2_name, schema2, read_units, write_units) + + # Wait for table to become active + table.refresh(wait_for_active=True) + table2.refresh(wait_for_active=True) + + # List tables and make sure new one is there + table_names = c.list_tables() + assert table_name in table_names + assert table2_name in table_names + + # Update the tables ProvisionedThroughput + new_read_units = 10 + new_write_units = 5 + table.update_throughput(new_read_units, new_write_units) + + # Wait for table to be updated + table.refresh(wait_for_active=True) + assert table.read_units == new_read_units + assert table.write_units == new_write_units + + # Put an item + item1_key = 'Amazon DynamoDB' + item1_range = 'DynamoDB Thread 1' + item1_attrs = { + 'Message': 'DynamoDB thread 1 message text', + 'LastPostedBy': 'User A', + 'Views': 0, + 'Replies': 0, + 'Answered': 0, + 'Public': True, + 'Tags': set(['index', 'primarykey', 'table']), + 'LastPostDateTime': '12/9/2011 11:36:03 PM'} + + # Test a few corner cases with new_item + + # Try supplying a hash_key as an arg and as an item in attrs + item1_attrs[self.hash_key_name] = 'foo' + foobar_item = table.new_item(item1_key, item1_range, item1_attrs) + assert foobar_item.hash_key == item1_key + + # Try supplying a range_key as an arg and as an item in attrs + item1_attrs[self.range_key_name] = 'bar' + foobar_item = table.new_item(item1_key, item1_range, item1_attrs) + assert foobar_item.range_key == item1_range + + # Try supplying hash and range key in attrs dict + foobar_item = table.new_item(attrs=item1_attrs) + assert foobar_item.hash_key == 'foo' + assert foobar_item.range_key == 'bar' + + del item1_attrs[self.hash_key_name] + del item1_attrs[self.range_key_name] + + item1 = table.new_item(item1_key, item1_range, item1_attrs) + # make sure the put() succeeds + try: + item1.put() + except c.layer1.ResponseError as e: + raise Exception("Item put failed: %s" % e) + + # Try to get an item that does not exist. + self.assertRaises(DynamoDBKeyNotFoundError, + table.get_item, 'bogus_key', item1_range) + + # Now do a consistent read and check results + item1_copy = table.get_item(item1_key, item1_range, + consistent_read=True) + assert item1_copy.hash_key == item1.hash_key + assert item1_copy.range_key == item1.range_key + for attr_name in item1_attrs: + val = item1_copy[attr_name] + if isinstance(val, (int, long_type, float, six.string_types)): + assert val == item1[attr_name] + + # Try retrieving only select attributes + attributes = ['Message', 'Views'] + item1_small = table.get_item(item1_key, item1_range, + attributes_to_get=attributes, + consistent_read=True) + for attr_name in item1_small: + # The item will include the attributes we asked for as + # well as the hashkey and rangekey, so filter those out. + if attr_name not in (item1_small.hash_key_name, + item1_small.range_key_name): + assert attr_name in attributes + + self.assertTrue(table.has_item(item1_key, range_key=item1_range, + consistent_read=True)) + + # Try to delete the item with the wrong Expected value + expected = {'Views': 1} + self.assertRaises(DynamoDBConditionalCheckFailedError, + item1.delete, expected_value=expected) + + # Try to delete a value while expecting a non-existant attribute + expected = {'FooBar': True} + try: + item1.delete(expected_value=expected) + except c.layer1.ResponseError: + pass + + # Now update the existing object + item1.add_attribute('Replies', 2) + + removed_attr = 'Public' + item1.delete_attribute(removed_attr) + + removed_tag = item1_attrs['Tags'].copy().pop() + item1.delete_attribute('Tags', set([removed_tag])) + + replies_by_set = set(['Adam', 'Arnie']) + item1.put_attribute('RepliesBy', replies_by_set) + retvals = item1.save(return_values='ALL_OLD') + # Need more tests here for variations on return_values + assert 'Attributes' in retvals + + # Check for correct updates + item1_updated = table.get_item(item1_key, item1_range, + consistent_read=True) + assert item1_updated['Replies'] == item1_attrs['Replies'] + 2 + self.assertFalse(removed_attr in item1_updated) + self.assertTrue(removed_tag not in item1_updated['Tags']) + self.assertTrue('RepliesBy' in item1_updated) + self.assertTrue(item1_updated['RepliesBy'] == replies_by_set) + + # Put a few more items into the table + item2_key = 'Amazon DynamoDB' + item2_range = 'DynamoDB Thread 2' + item2_attrs = { + 'Message': 'DynamoDB thread 2 message text', + 'LastPostedBy': 'User A', + 'Views': 0, + 'Replies': 0, + 'Answered': 0, + 'Tags': set(["index", "primarykey", "table"]), + 'LastPost2DateTime': '12/9/2011 11:36:03 PM'} + item2 = table.new_item(item2_key, item2_range, item2_attrs) + item2.put() + + item3_key = 'Amazon S3' + item3_range = 'S3 Thread 1' + item3_attrs = { + 'Message': 'S3 Thread 1 message text', + 'LastPostedBy': 'User A', + 'Views': 0, + 'Replies': 0, + 'Answered': 0, + 'Tags': set(['largeobject', 'multipart upload']), + 'LastPostDateTime': '12/9/2011 11:36:03 PM' + } + item3 = table.new_item(item3_key, item3_range, item3_attrs) + item3.put() + + # Put an item into the second table + table2_item1_key = uuid.uuid4().hex + table2_item1_attrs = { + 'DateTimePosted': '25/1/2011 12:34:56 PM', + 'Text': 'I think boto rocks and so does DynamoDB' + } + table2_item1 = table2.new_item(table2_item1_key, + attrs=table2_item1_attrs) + table2_item1.put() + + # Try a few queries + items = table.query('Amazon DynamoDB', range_key_condition=BEGINS_WITH('DynamoDB')) + n = 0 + for item in items: + n += 1 + assert n == 2 + assert items.consumed_units > 0 + + items = table.query('Amazon DynamoDB', range_key_condition=BEGINS_WITH('DynamoDB'), + request_limit=1, max_results=1) + n = 0 + for item in items: + n += 1 + assert n == 1 + assert items.consumed_units > 0 + + # Try a few scans + items = table.scan() + n = 0 + for item in items: + n += 1 + assert n == 3 + assert items.consumed_units > 0 + + items = table.scan(scan_filter={'Replies': GT(0)}) + n = 0 + for item in items: + n += 1 + assert n == 1 + assert items.consumed_units > 0 + + # Test some integer and float attributes + integer_value = 42 + float_value = 345.678 + item3['IntAttr'] = integer_value + item3['FloatAttr'] = float_value + + # Test booleans + item3['TrueBoolean'] = True + item3['FalseBoolean'] = False + + # Test some set values + integer_set = set([1, 2, 3, 4, 5]) + float_set = set([1.1, 2.2, 3.3, 4.4, 5.5]) + mixed_set = set([1, 2, 3.3, 4, 5.555]) + str_set = set(['foo', 'bar', 'fie', 'baz']) + item3['IntSetAttr'] = integer_set + item3['FloatSetAttr'] = float_set + item3['MixedSetAttr'] = mixed_set + item3['StrSetAttr'] = str_set + item3.put() + + # Now do a consistent read + item4 = table.get_item(item3_key, item3_range, consistent_read=True) + assert item4['IntAttr'] == integer_value + assert item4['FloatAttr'] == float_value + assert bool(item4['TrueBoolean']) is True + assert bool(item4['FalseBoolean']) is False + # The values will not necessarily be in the same order as when + # we wrote them to the DB. + for i in item4['IntSetAttr']: + assert i in integer_set + for i in item4['FloatSetAttr']: + assert i in float_set + for i in item4['MixedSetAttr']: + assert i in mixed_set + for i in item4['StrSetAttr']: + assert i in str_set + + # Try a batch get + batch_list = c.new_batch_list() + batch_list.add_batch(table, [(item2_key, item2_range), + (item3_key, item3_range)]) + response = batch_list.submit() + assert len(response['Responses'][table.name]['Items']) == 2 + + # Try an empty batch get + batch_list = c.new_batch_list() + batch_list.add_batch(table, []) + response = batch_list.submit() + assert response == {} + + # Try a few batch write operations + item4_key = 'Amazon S3' + item4_range = 'S3 Thread 2' + item4_attrs = { + 'Message': 'S3 Thread 2 message text', + 'LastPostedBy': 'User A', + 'Views': 0, + 'Replies': 0, + 'Answered': 0, + 'Tags': set(['largeobject', 'multipart upload']), + 'LastPostDateTime': '12/9/2011 11:36:03 PM' + } + item5_key = 'Amazon S3' + item5_range = 'S3 Thread 3' + item5_attrs = { + 'Message': 'S3 Thread 3 message text', + 'LastPostedBy': 'User A', + 'Views': 0, + 'Replies': 0, + 'Answered': 0, + 'Tags': set(['largeobject', 'multipart upload']), + 'LastPostDateTime': '12/9/2011 11:36:03 PM' + } + item4 = table.new_item(item4_key, item4_range, item4_attrs) + item5 = table.new_item(item5_key, item5_range, item5_attrs) + batch_list = c.new_batch_write_list() + batch_list.add_batch(table, puts=[item4, item5]) + response = batch_list.submit() + # should really check for unprocessed items + + # Do some generator gymnastics + results = table.scan(scan_filter={'Tags': CONTAINS('table')}) + assert results.scanned_count == 5 + results = table.scan(request_limit=2, max_results=5) + assert results.count == 2 + for item in results: + if results.count == 2: + assert results.remaining == 4 + results.remaining -= 2 + results.next_response() + else: + assert results.count == 4 + assert results.remaining in (0, 1) + assert results.count == 4 + results = table.scan(request_limit=6, max_results=4) + assert len(list(results)) == 4 + assert results.count == 4 + + batch_list = c.new_batch_write_list() + batch_list.add_batch(table, deletes=[(item4_key, item4_range), + (item5_key, item5_range)]) + response = batch_list.submit() + + # Try queries + results = table.query('Amazon DynamoDB', range_key_condition=BEGINS_WITH('DynamoDB')) + n = 0 + for item in results: + n += 1 + assert n == 2 + + # Try to delete the item with the right Expected value + expected = {'Views': 0} + item1.delete(expected_value=expected) + + self.assertFalse(table.has_item(item1_key, range_key=item1_range, + consistent_read=True)) + # Now delete the remaining items + ret_vals = item2.delete(return_values='ALL_OLD') + # some additional checks here would be useful + assert ret_vals['Attributes'][self.hash_key_name] == item2_key + assert ret_vals['Attributes'][self.range_key_name] == item2_range + + item3.delete() + table2_item1.delete() + print('--- tests completed ---') + + def test_binary_attrs(self): + c = self.dynamodb + schema = c.create_schema(self.hash_key_name, self.hash_key_proto_value, + self.range_key_name, + self.range_key_proto_value) + index = int(time.time()) + table_name = 'test-%d' % index + read_units = 5 + write_units = 5 + table = self.create_table(table_name, schema, read_units, write_units) + table.refresh(wait_for_active=True) + item1_key = 'Amazon S3' + item1_range = 'S3 Thread 1' + item1_attrs = { + 'Message': 'S3 Thread 1 message text', + 'LastPostedBy': 'User A', + 'Views': 0, + 'Replies': 0, + 'Answered': 0, + 'BinaryData': Binary(b'\x01\x02\x03\x04'), + 'BinarySequence': set([Binary(b'\x01\x02'), Binary(b'\x03\x04')]), + 'Tags': set(['largeobject', 'multipart upload']), + 'LastPostDateTime': '12/9/2011 11:36:03 PM' + } + item1 = table.new_item(item1_key, item1_range, item1_attrs) + item1.put() + + retrieved = table.get_item(item1_key, item1_range, consistent_read=True) + self.assertEqual(retrieved['Message'], 'S3 Thread 1 message text') + self.assertEqual(retrieved['Views'], 0) + self.assertEqual(retrieved['Tags'], + set(['largeobject', 'multipart upload'])) + self.assertEqual(retrieved['BinaryData'], Binary(b'\x01\x02\x03\x04')) + # Also comparable directly to bytes: + self.assertEqual(retrieved['BinaryData'], b'\x01\x02\x03\x04') + self.assertEqual(retrieved['BinarySequence'], + set([Binary(b'\x01\x02'), Binary(b'\x03\x04')])) + + def test_put_decimal_attrs(self): + self.dynamodb.use_decimals() + table = self.create_sample_table() + item = table.new_item('foo', 'bar') + item['decimalvalue'] = Decimal('1.12345678912345') + item.put() + retrieved = table.get_item('foo', 'bar') + self.assertEqual(retrieved['decimalvalue'], Decimal('1.12345678912345')) + + @unittest.skipIf(six.PY3, "skipping lossy_float_conversion test for Python 3.x") + def test_lossy_float_conversion(self): + table = self.create_sample_table() + item = table.new_item('foo', 'bar') + item['floatvalue'] = 1.12345678912345 + item.put() + retrieved = table.get_item('foo', 'bar')['floatvalue'] + # Notice how this is not equal to the original value. + self.assertNotEqual(1.12345678912345, retrieved) + # Instead, it's truncated: + self.assertEqual(1.12345678912, retrieved) + + def test_large_integers(self): + # It's not just floating point numbers, large integers + # can trigger rouding issues. + self.dynamodb.use_decimals() + table = self.create_sample_table() + item = table.new_item('foo', 'bar') + item['decimalvalue'] = Decimal('129271300103398600') + item.put() + retrieved = table.get_item('foo', 'bar') + self.assertEqual(retrieved['decimalvalue'], Decimal('129271300103398600')) + # Also comparable directly to an int. + self.assertEqual(retrieved['decimalvalue'], 129271300103398600) + + def test_put_single_letter_attr(self): + # When an attr is added that is a single letter, if it overlaps with + # the built-in "types", the decoding used to fall down. Assert that + # it's now working correctly. + table = self.create_sample_table() + item = table.new_item('foo', 'foo1') + item.put_attribute('b', 4) + stored = item.save(return_values='UPDATED_NEW') + self.assertEqual(stored['Attributes'], {'b': 4}) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_table.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_table.py new file mode 100644 index 0000000000000000000000000000000000000000..c407b36970d1d783545833fb6b8edb9cbaa30be4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb/test_table.py @@ -0,0 +1,84 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import time +from tests.unit import unittest + +from boto.dynamodb.layer2 import Layer2 +from boto.dynamodb.table import Table +from boto.dynamodb.schema import Schema + + +class TestDynamoDBTable(unittest.TestCase): + dynamodb = True + + def setUp(self): + self.dynamodb = Layer2() + self.schema = Schema.create(('foo', 'N'), ('bar', 'S')) + self.table_name = 'testtable%s' % int(time.time()) + + def create_table(self, table_name, schema, read_units, write_units): + result = self.dynamodb.create_table(table_name, schema, read_units, write_units) + self.addCleanup(self.dynamodb.delete_table, result) + return result + + def assertAllEqual(self, *items): + first = items[0] + for item in items[1:]: + self.assertEqual(first, item) + + def test_table_retrieval_parity(self): + created_table = self.dynamodb.create_table( + self.table_name, self.schema, 1, 1) + created_table.refresh(wait_for_active=True) + + retrieved_table = self.dynamodb.get_table(self.table_name) + + constructed_table = self.dynamodb.table_from_schema(self.table_name, + self.schema) + + # All three tables should have the same name + # and schema attributes. + self.assertAllEqual(created_table.name, + retrieved_table.name, + constructed_table.name) + + self.assertAllEqual(created_table.schema, + retrieved_table.schema, + constructed_table.schema) + + # However for create_time, status, read/write units, + # only the created/retrieved table will have equal + # values. + self.assertEqual(created_table.create_time, + retrieved_table.create_time) + self.assertEqual(created_table.status, + retrieved_table.status) + self.assertEqual(created_table.read_units, + retrieved_table.read_units) + self.assertEqual(created_table.write_units, + retrieved_table.write_units) + + # The constructed table will have values of None. + self.assertIsNone(constructed_table.create_time) + self.assertIsNone(constructed_table.status) + self.assertIsNone(constructed_table.read_units) + self.assertIsNone(constructed_table.write_units) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/forum_test_data.json b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/forum_test_data.json new file mode 100644 index 0000000000000000000000000000000000000000..fbacc43cd10df9e42adc7920a8b883ee66b0a589 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/forum_test_data.json @@ -0,0 +1,50 @@ +[ + { + "thread": "Favorite chiptune band?", + "posted_by": "joe", + "posted_on": "2013-12-24T12:30:54", + "body": "Forum poll: What's your favorite chiptune band & why?" + }, + { + "thread": "Favorite chiptune band?", + "posted_by": "jane", + "posted_on": "2013-12-24T12:35:40", + "body": "I'd definitely go with POWERLIFTER. Love the use of LSDJ & vocals." + }, + { + "thread": "Favorite chiptune band?", + "posted_by": "joe", + "posted_on": "2013-12-24T13:45:30", + "body": "Hm, I hadn't heard of them before. Will give a listen.\n\nMy favorite is definitely D&D Sluggers so far." + }, + { + "thread": "Favorite chiptune band?", + "posted_by": "joe", + "posted_on": "2013-12-24T14:15:14", + "body": "Oh man, POWERLIFTER is really good. Do they have any more albums than the first one?" + }, + { + "thread": "Favorite chiptune band?", + "posted_by": "jane", + "posted_on": "2013-12-24T14:25:33", + "body": "Yeah, check out their site. The second album has been out for a bit & is just as good." + }, + { + "thread": "Help with compression?", + "posted_by": "jane", + "posted_on": "2013-12-24T14:26:51", + "body": "I'm working on my latest & having some trouble. I've got compression on my drum track but I still can't keep the drum for muddying the bass line without losing clarity on the hats. :( Help?" + }, + { + "thread": "Favorite chiptune band?", + "posted_by": "joe", + "posted_on": "2013-12-24T15:22:22", + "body": "Thanks for the tip! I'll have to check it out!" + }, + { + "thread": "Help with compression?", + "posted_by": "joe", + "posted_on": "2013-12-24T15:26:06", + "body": "Have you tried using side-chaining the compression? That'll allow the bass' input to control the volume of the drums based on when it's playing." + } +] diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..2fc93fd5182aa3ee38ff6d120554cbefd637d070 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.dynamodb2 + + +class DynamoDB2CertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + dynamodb2 = True + regions = boto.dynamodb2.regions() + + def sample_service_call(self, conn): + conn.list_tables() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/test_highlevel.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/test_highlevel.py new file mode 100644 index 0000000000000000000000000000000000000000..833400783ee98aad066494e6302dc4fa36468b55 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/test_highlevel.py @@ -0,0 +1,821 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests for DynamoDB v2 high-level abstractions. +""" +import os +import time + +from tests.unit import unittest +from boto.dynamodb2 import exceptions +from boto.dynamodb2.fields import (HashKey, RangeKey, KeysOnlyIndex, + GlobalKeysOnlyIndex, GlobalIncludeIndex, + GlobalAllIndex) +from boto.dynamodb2.items import Item +from boto.dynamodb2.table import Table +from boto.dynamodb2.types import NUMBER, STRING + +try: + import json +except ImportError: + import simplejson as json + + +class DynamoDBv2Test(unittest.TestCase): + dynamodb = True + + def test_integration(self): + # Test creating a full table with all options specified. + users = Table.create('users', schema=[ + HashKey('username'), + RangeKey('friend_count', data_type=NUMBER) + ], throughput={ + 'read': 5, + 'write': 5, + }, indexes=[ + KeysOnlyIndex('LastNameIndex', parts=[ + HashKey('username'), + RangeKey('last_name') + ]), + ]) + self.addCleanup(users.delete) + + self.assertEqual(len(users.schema), 2) + self.assertEqual(users.throughput['read'], 5) + + # Wait for it. + time.sleep(60) + + # Make sure things line up if we're introspecting the table. + users_hit_api = Table('users') + users_hit_api.describe() + self.assertEqual(len(users.schema), len(users_hit_api.schema)) + self.assertEqual(users.throughput, users_hit_api.throughput) + self.assertEqual(len(users.indexes), len(users_hit_api.indexes)) + + # Test putting some items individually. + users.put_item(data={ + 'username': 'johndoe', + 'first_name': 'John', + 'last_name': 'Doe', + 'friend_count': 4 + }) + + users.put_item(data={ + 'username': 'alice', + 'first_name': 'Alice', + 'last_name': 'Expert', + 'friend_count': 2 + }) + + time.sleep(5) + + # Test batch writing. + with users.batch_write() as batch: + batch.put_item({ + 'username': 'jane', + 'first_name': 'Jane', + 'last_name': 'Doe', + 'friend_count': 3 + }) + batch.delete_item(username='alice', friend_count=2) + batch.put_item({ + 'username': 'bob', + 'first_name': 'Bob', + 'last_name': 'Smith', + 'friend_count': 1 + }) + + time.sleep(5) + + # Does it exist? It should? + self.assertTrue(users.has_item(username='jane', friend_count=3)) + # But this shouldn't be there... + self.assertFalse(users.has_item( + username='mrcarmichaeljones', + friend_count=72948 + )) + + # Test getting an item & updating it. + # This is the "safe" variant (only write if there have been no + # changes). + jane = users.get_item(username='jane', friend_count=3) + self.assertEqual(jane['first_name'], 'Jane') + jane['last_name'] = 'Doh' + self.assertTrue(jane.save()) + + # Test strongly consistent getting of an item. + # Additionally, test the overwrite behavior. + client_1_jane = users.get_item( + username='jane', + friend_count=3, + consistent=True + ) + self.assertEqual(jane['first_name'], 'Jane') + client_2_jane = users.get_item( + username='jane', + friend_count=3, + consistent=True + ) + self.assertEqual(jane['first_name'], 'Jane') + + # Write & assert the ``first_name`` is gone, then... + del client_1_jane['first_name'] + self.assertTrue(client_1_jane.save()) + check_name = users.get_item( + username='jane', + friend_count=3, + consistent=True + ) + self.assertEqual(check_name['first_name'], None) + + # ...overwrite the data with what's in memory. + client_2_jane['first_name'] = 'Joan' + # Now a write that fails due to default expectations... + self.assertRaises(exceptions.JSONResponseError, client_2_jane.save) + # ... so we force an overwrite. + self.assertTrue(client_2_jane.save(overwrite=True)) + check_name_again = users.get_item( + username='jane', + friend_count=3, + consistent=True + ) + self.assertEqual(check_name_again['first_name'], 'Joan') + + # Reset it. + jane['username'] = 'jane' + jane['first_name'] = 'Jane' + jane['last_name'] = 'Doe' + jane['friend_count'] = 3 + self.assertTrue(jane.save(overwrite=True)) + + # Test the partial update behavior. + client_3_jane = users.get_item( + username='jane', + friend_count=3, + consistent=True + ) + client_4_jane = users.get_item( + username='jane', + friend_count=3, + consistent=True + ) + client_3_jane['favorite_band'] = 'Feed Me' + # No ``overwrite`` needed due to new data. + self.assertTrue(client_3_jane.save()) + # Expectations are only checked on the ``first_name``, so what wouldn't + # have succeeded by default does succeed here. + client_4_jane['first_name'] = 'Jacqueline' + self.assertTrue(client_4_jane.partial_save()) + partial_jane = users.get_item( + username='jane', + friend_count=3, + consistent=True + ) + self.assertEqual(partial_jane['favorite_band'], 'Feed Me') + self.assertEqual(partial_jane['first_name'], 'Jacqueline') + + # Reset it. + jane['username'] = 'jane' + jane['first_name'] = 'Jane' + jane['last_name'] = 'Doe' + jane['friend_count'] = 3 + self.assertTrue(jane.save(overwrite=True)) + + # Ensure that partial saves of a brand-new object work. + sadie = Item(users, data={ + 'username': 'sadie', + 'first_name': 'Sadie', + 'favorite_band': 'Zedd', + 'friend_count': 7 + }) + self.assertTrue(sadie.partial_save()) + serverside_sadie = users.get_item( + username='sadie', + friend_count=7, + consistent=True + ) + self.assertEqual(serverside_sadie['first_name'], 'Sadie') + + # Test the eventually consistent query. + results = users.query_2( + username__eq='johndoe', + last_name__eq='Doe', + index='LastNameIndex', + attributes=('username',), + reverse=True + ) + + for res in results: + self.assertTrue(res['username'] in ['johndoe',]) + self.assertEqual(list(res.keys()), ['username']) + + # Ensure that queries with attributes don't return the hash key. + results = users.query_2( + username__eq='johndoe', + friend_count__eq=4, + attributes=('first_name',) + ) + + for res in results: + self.assertEqual(res['first_name'], 'John') + self.assertEqual(list(res.keys()), ['first_name']) + + # Test the strongly consistent query. + c_results = users.query_2( + username__eq='johndoe', + last_name__eq='Doe', + index='LastNameIndex', + reverse=True, + consistent=True + ) + + for res in c_results: + self.assertEqual(res['username'], 'johndoe') + + # Test a query with query filters + results = users.query_2( + username__eq='johndoe', + query_filter={ + 'first_name__beginswith': 'J' + }, + attributes=('first_name',) + ) + + for res in results: + self.assertTrue(res['first_name'] in ['John']) + + # Test scans without filters. + all_users = users.scan(limit=7) + self.assertEqual(next(all_users)['username'], 'bob') + self.assertEqual(next(all_users)['username'], 'jane') + self.assertEqual(next(all_users)['username'], 'johndoe') + + # Test scans with a filter. + filtered_users = users.scan(limit=2, username__beginswith='j') + self.assertEqual(next(filtered_users)['username'], 'jane') + self.assertEqual(next(filtered_users)['username'], 'johndoe') + + # Test deleting a single item. + johndoe = users.get_item(username='johndoe', friend_count=4) + johndoe.delete() + + # Set batch get limit to ensure keys with no results are + # handled correctly. + users.max_batch_get = 2 + + # Test the eventually consistent batch get. + results = users.batch_get(keys=[ + {'username': 'noone', 'friend_count': 4}, + {'username': 'nothere', 'friend_count': 10}, + {'username': 'bob', 'friend_count': 1}, + {'username': 'jane', 'friend_count': 3} + ]) + batch_users = [] + + for res in results: + batch_users.append(res) + self.assertIn(res['first_name'], ['Bob', 'Jane']) + + self.assertEqual(len(batch_users), 2) + + # Test the strongly consistent batch get. + c_results = users.batch_get(keys=[ + {'username': 'bob', 'friend_count': 1}, + {'username': 'jane', 'friend_count': 3} + ], consistent=True) + c_batch_users = [] + + for res in c_results: + c_batch_users.append(res) + self.assertTrue(res['first_name'] in ['Bob', 'Jane']) + + self.assertEqual(len(c_batch_users), 2) + + # Test count, but in a weak fashion. Because lag time. + self.assertTrue(users.count() > -1) + + # Test query count + count = users.query_count( + username__eq='bob', + ) + + self.assertEqual(count, 1) + + # Test without LSIs (describe calls shouldn't fail). + admins = Table.create('admins', schema=[ + HashKey('username') + ]) + self.addCleanup(admins.delete) + time.sleep(60) + admins.describe() + self.assertEqual(admins.throughput['read'], 5) + self.assertEqual(admins.indexes, []) + + # A single query term should fail on a table with *ONLY* a HashKey. + self.assertRaises( + exceptions.QueryError, + admins.query, + username__eq='johndoe' + ) + # But it shouldn't break on more complex tables. + res = users.query_2(username__eq='johndoe') + + # Test putting with/without sets. + mau5_created = users.put_item(data={ + 'username': 'mau5', + 'first_name': 'dead', + 'last_name': 'mau5', + 'friend_count': 2, + 'friends': set(['skrill', 'penny']), + }) + self.assertTrue(mau5_created) + + penny_created = users.put_item(data={ + 'username': 'penny', + 'first_name': 'Penny', + 'friend_count': 0, + 'friends': set([]), + }) + self.assertTrue(penny_created) + + # Test attributes. + mau5 = users.get_item( + username='mau5', + friend_count=2, + attributes=['username', 'first_name'] + ) + self.assertEqual(mau5['username'], 'mau5') + self.assertEqual(mau5['first_name'], 'dead') + self.assertTrue('last_name' not in mau5) + + def test_unprocessed_batch_writes(self): + # Create a very limited table w/ low throughput. + users = Table.create('slow_users', schema=[ + HashKey('user_id'), + ], throughput={ + 'read': 1, + 'write': 1, + }) + self.addCleanup(users.delete) + + # Wait for it. + time.sleep(60) + + with users.batch_write() as batch: + for i in range(500): + batch.put_item(data={ + 'user_id': str(i), + 'name': 'Droid #{0}'.format(i), + }) + + # Before ``__exit__`` runs, we should have a bunch of unprocessed + # items. + self.assertTrue(len(batch._unprocessed) > 0) + + # Post-__exit__, they should all be gone. + self.assertEqual(len(batch._unprocessed), 0) + + def test_gsi(self): + users = Table.create('gsi_users', schema=[ + HashKey('user_id'), + ], throughput={ + 'read': 5, + 'write': 3, + }, + global_indexes=[ + GlobalKeysOnlyIndex('StuffIndex', parts=[ + HashKey('user_id') + ], throughput={ + 'read': 2, + 'write': 1, + }), + ]) + self.addCleanup(users.delete) + + # Wait for it. + time.sleep(60) + + users.update( + throughput={ + 'read': 3, + 'write': 4 + }, + global_indexes={ + 'StuffIndex': { + 'read': 1, + 'write': 2 + } + } + ) + + # Wait again for the changes to finish propagating. + time.sleep(150) + + def test_gsi_with_just_hash_key(self): + # GSI allows for querying off of different keys. This is behavior we + # previously disallowed (due to standard & LSI queries). + # See https://forums.aws.amazon.com/thread.jspa?threadID=146212&tstart=0 + users = Table.create('gsi_query_users', schema=[ + HashKey('user_id') + ], throughput={ + 'read': 5, + 'write': 3, + }, + global_indexes=[ + GlobalIncludeIndex('UsernameIndex', parts=[ + HashKey('username'), + ], includes=['user_id', 'username'], throughput={ + 'read': 3, + 'write': 1, + }) + ]) + self.addCleanup(users.delete) + + # Wait for it. + time.sleep(60) + + users.put_item(data={ + 'user_id': '7', + 'username': 'johndoe', + 'first_name': 'John', + 'last_name': 'Doe', + }) + users.put_item(data={ + 'user_id': '24', + 'username': 'alice', + 'first_name': 'Alice', + 'last_name': 'Expert', + }) + users.put_item(data={ + 'user_id': '35', + 'username': 'jane', + 'first_name': 'Jane', + 'last_name': 'Doe', + }) + + # Try the main key. Should be fine. + rs = users.query_2( + user_id__eq='24' + ) + results = sorted([user['username'] for user in rs]) + self.assertEqual(results, ['alice']) + + # Now try the GSI. Also should work. + rs = users.query_2( + username__eq='johndoe', + index='UsernameIndex' + ) + results = sorted([user['username'] for user in rs]) + self.assertEqual(results, ['johndoe']) + + def test_query_with_limits(self): + # Per the DDB team, it's recommended to do many smaller gets with a + # reduced page size. + # Clamp down the page size while ensuring that the correct number of + # results are still returned. + posts = Table.create('posts', schema=[ + HashKey('thread'), + RangeKey('posted_on') + ], throughput={ + 'read': 5, + 'write': 5, + }) + self.addCleanup(posts.delete) + + # Wait for it. + time.sleep(60) + + # Add some data. + test_data_path = os.path.join( + os.path.dirname(__file__), + 'forum_test_data.json' + ) + with open(test_data_path, 'r') as test_data: + data = json.load(test_data) + + with posts.batch_write() as batch: + for post in data: + batch.put_item(post) + + time.sleep(5) + + # Test the reduced page size. + results = posts.query_2( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + max_page_size=2 + ) + + all_posts = list(results) + self.assertEqual( + [post['posted_by'] for post in all_posts], + ['joe', 'jane', 'joe', 'joe', 'jane', 'joe'] + ) + self.assertTrue(results._fetches >= 3) + + def test_query_with_reverse(self): + posts = Table.create('more-posts', schema=[ + HashKey('thread'), + RangeKey('posted_on') + ], throughput={ + 'read': 5, + 'write': 5, + }) + self.addCleanup(posts.delete) + + # Wait for it. + time.sleep(60) + + # Add some data. + test_data_path = os.path.join( + os.path.dirname(__file__), + 'forum_test_data.json' + ) + with open(test_data_path, 'r') as test_data: + data = json.load(test_data) + + with posts.batch_write() as batch: + for post in data: + batch.put_item(post) + + time.sleep(5) + + # Test the default order (ascending). + results = posts.query_2( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00' + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T12:30:54', + '2013-12-24T12:35:40', + '2013-12-24T13:45:30', + '2013-12-24T14:15:14', + '2013-12-24T14:25:33', + '2013-12-24T15:22:22', + ] + ) + + # Test the explicit ascending order. + results = posts.query_2( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + reverse=False + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T12:30:54', + '2013-12-24T12:35:40', + '2013-12-24T13:45:30', + '2013-12-24T14:15:14', + '2013-12-24T14:25:33', + '2013-12-24T15:22:22', + ] + ) + + # Test the explicit descending order. + results = posts.query_2( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + reverse=True + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T15:22:22', + '2013-12-24T14:25:33', + '2013-12-24T14:15:14', + '2013-12-24T13:45:30', + '2013-12-24T12:35:40', + '2013-12-24T12:30:54', + ] + ) + + # Test the old, broken style. + results = posts.query( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00' + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T15:22:22', + '2013-12-24T14:25:33', + '2013-12-24T14:15:14', + '2013-12-24T13:45:30', + '2013-12-24T12:35:40', + '2013-12-24T12:30:54', + ] + ) + results = posts.query( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + reverse=True + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T12:30:54', + '2013-12-24T12:35:40', + '2013-12-24T13:45:30', + '2013-12-24T14:15:14', + '2013-12-24T14:25:33', + '2013-12-24T15:22:22', + ] + ) + + def test_query_after_describe_with_gsi(self): + # Create a table to using gsi to reproduce the error mentioned on issue + # https://github.com/boto/boto/issues/2828 + users = Table.create('more_gsi_query_users', schema=[ + HashKey('user_id') + ], throughput={ + 'read': 5, + 'write': 5 + }, global_indexes=[ + GlobalAllIndex('EmailGSIIndex', parts=[ + HashKey('email') + ], throughput={ + 'read': 1, + 'write': 1 + }) + ]) + + # Add this function to be called after tearDown() + self.addCleanup(users.delete) + + # Wait for it. + time.sleep(60) + + # populate a couple of items in it + users.put_item(data={ + 'user_id': '7', + 'username': 'johndoe', + 'first_name': 'John', + 'last_name': 'Doe', + 'email': 'johndoe@johndoe.com', + }) + users.put_item(data={ + 'user_id': '24', + 'username': 'alice', + 'first_name': 'Alice', + 'last_name': 'Expert', + 'email': 'alice@alice.com', + }) + users.put_item(data={ + 'user_id': '35', + 'username': 'jane', + 'first_name': 'Jane', + 'last_name': 'Doe', + 'email': 'jane@jane.com', + }) + + # Try the GSI. it should work. + rs = users.query_2( + email__eq='johndoe@johndoe.com', + index='EmailGSIIndex' + ) + + for rs_item in rs: + self.assertEqual(rs_item['username'], ['johndoe']) + + # The issue arises when we're introspecting the table and try to + # query_2 after call describe method + users_hit_api = Table('more_gsi_query_users') + users_hit_api.describe() + + # Try the GSI. This is what os going wrong on #2828 issue. It should + # work fine now. + rs = users_hit_api.query_2( + email__eq='johndoe@johndoe.com', + index='EmailGSIIndex' + ) + + for rs_item in rs: + self.assertEqual(rs_item['username'], ['johndoe']) + + def test_update_table_online_indexing_support(self): + # Create a table using gsi to test the DynamoDB online indexing support + # https://github.com/boto/boto/pull/2925 + users = Table.create('online_indexing_support_users', schema=[ + HashKey('user_id') + ], throughput={ + 'read': 5, + 'write': 5 + }, global_indexes=[ + GlobalAllIndex('EmailGSIIndex', parts=[ + HashKey('email') + ], throughput={ + 'read': 2, + 'write': 2 + }) + ]) + + # Add this function to be called after tearDown() + self.addCleanup(users.delete) + + # Wait for it. + time.sleep(60) + + # Fetch fresh table desc from DynamoDB + users.describe() + + # Assert if everything is fine so far + self.assertEqual(len(users.global_indexes), 1) + self.assertEqual(users.global_indexes[0].throughput['read'], 2) + self.assertEqual(users.global_indexes[0].throughput['write'], 2) + + # Update a GSI throughput. it should work. + users.update_global_secondary_index(global_indexes={ + 'EmailGSIIndex': { + 'read': 2, + 'write': 1, + } + }) + + # Wait for it. + time.sleep(60) + + # Fetch fresh table desc from DynamoDB + users.describe() + + # Assert if everything is fine so far + self.assertEqual(len(users.global_indexes), 1) + self.assertEqual(users.global_indexes[0].throughput['read'], 2) + self.assertEqual(users.global_indexes[0].throughput['write'], 1) + + # Update a GSI throughput using the old fashion way for compatibility + # purposes. it should work. + users.update(global_indexes={ + 'EmailGSIIndex': { + 'read': 3, + 'write': 2, + } + }) + + # Wait for it. + time.sleep(60) + + # Fetch fresh table desc from DynamoDB + users.describe() + + # Assert if everything is fine so far + self.assertEqual(len(users.global_indexes), 1) + self.assertEqual(users.global_indexes[0].throughput['read'], 3) + self.assertEqual(users.global_indexes[0].throughput['write'], 2) + + # Delete a GSI. it should work. + users.delete_global_secondary_index('EmailGSIIndex') + + # Wait for it. + time.sleep(60) + + # Fetch fresh table desc from DynamoDB + users.describe() + + # Assert if everything is fine so far + self.assertEqual(len(users.global_indexes), 0) + + # Create a GSI. it should work. + users.create_global_secondary_index( + global_index=GlobalAllIndex( + 'AddressGSIIndex', parts=[ + HashKey('address', data_type=STRING) + ], throughput={ + 'read': 1, + 'write': 1, + }) + ) + # Wait for it. This operation usually takes much longer than the others + time.sleep(60*10) + + # Fetch fresh table desc from DynamoDB + users.describe() + + # Assert if everything is fine so far + self.assertEqual(len(users.global_indexes), 1) + self.assertEqual(users.global_indexes[0].throughput['read'], 1) + self.assertEqual(users.global_indexes[0].throughput['write'], 1) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..98e8a3cb1e31d6241333aaad0070b2f2b4e54462 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/dynamodb2/test_layer1.py @@ -0,0 +1,363 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests for Layer1 of DynamoDB v2 +""" +import time + +from tests.unit import unittest +from boto.dynamodb2 import exceptions +from boto.dynamodb2.layer1 import DynamoDBConnection + + +class DynamoDBv2Layer1Test(unittest.TestCase): + dynamodb = True + + def setUp(self): + self.dynamodb = DynamoDBConnection() + self.table_name = 'test-%d' % int(time.time()) + self.hash_key_name = 'username' + self.hash_key_type = 'S' + self.range_key_name = 'date_joined' + self.range_key_type = 'N' + self.read_units = 5 + self.write_units = 5 + self.attributes = [ + { + 'AttributeName': self.hash_key_name, + 'AttributeType': self.hash_key_type, + }, + { + 'AttributeName': self.range_key_name, + 'AttributeType': self.range_key_type, + } + ] + self.schema = [ + { + 'AttributeName': self.hash_key_name, + 'KeyType': 'HASH', + }, + { + 'AttributeName': self.range_key_name, + 'KeyType': 'RANGE', + }, + ] + self.provisioned_throughput = { + 'ReadCapacityUnits': self.read_units, + 'WriteCapacityUnits': self.write_units, + } + self.lsi = [ + { + 'IndexName': 'MostRecentIndex', + 'KeySchema': [ + { + 'AttributeName': self.hash_key_name, + 'KeyType': 'HASH', + }, + { + 'AttributeName': self.range_key_name, + 'KeyType': 'RANGE', + }, + ], + 'Projection': { + 'ProjectionType': 'KEYS_ONLY', + } + } + ] + + def create_table(self, table_name, attributes, schema, + provisioned_throughput, lsi=None, wait=True): + # Note: This is a slightly different ordering that makes less sense. + result = self.dynamodb.create_table( + attributes, + table_name, + schema, + provisioned_throughput, + local_secondary_indexes=lsi + ) + self.addCleanup(self.dynamodb.delete_table, table_name) + if wait: + while True: + description = self.dynamodb.describe_table(table_name) + if description['Table']['TableStatus'].lower() == 'active': + return result + else: + time.sleep(5) + else: + return result + + def test_integrated(self): + result = self.create_table( + self.table_name, + self.attributes, + self.schema, + self.provisioned_throughput, + self.lsi + ) + self.assertEqual( + result['TableDescription']['TableName'], + self.table_name + ) + + description = self.dynamodb.describe_table(self.table_name) + self.assertEqual(description['Table']['ItemCount'], 0) + + # Create some records. + record_1_data = { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056668'}, + 'friend_count': {'N': '3'}, + 'friends': {'SS': ['alice', 'bob', 'jane']}, + } + r1_result = self.dynamodb.put_item(self.table_name, record_1_data) + + # Get the data. + record_1 = self.dynamodb.get_item(self.table_name, key={ + 'username': {'S': 'johndoe'}, + 'date_joined': {'N': '1366056668'}, + }, consistent_read=True) + self.assertEqual(record_1['Item']['username']['S'], 'johndoe') + self.assertEqual(record_1['Item']['first_name']['S'], 'John') + self.assertEqual(record_1['Item']['friends']['SS'], [ + 'alice', 'bob', 'jane' + ]) + + # Now in a batch. + self.dynamodb.batch_write_item({ + self.table_name: [ + { + 'PutRequest': { + 'Item': { + 'username': {'S': 'jane'}, + 'first_name': {'S': 'Jane'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056789'}, + 'friend_count': {'N': '1'}, + 'friends': {'SS': ['johndoe']}, + }, + }, + }, + ] + }) + + # Now a query. + lsi_results = self.dynamodb.query( + self.table_name, + index_name='MostRecentIndex', + key_conditions={ + 'username': { + 'AttributeValueList': [ + {'S': 'johndoe'}, + ], + 'ComparisonOperator': 'EQ', + }, + }, + consistent_read=True + ) + self.assertEqual(lsi_results['Count'], 1) + + results = self.dynamodb.query(self.table_name, key_conditions={ + 'username': { + 'AttributeValueList': [ + {'S': 'jane'}, + ], + 'ComparisonOperator': 'EQ', + }, + 'date_joined': { + 'AttributeValueList': [ + {'N': '1366050000'} + ], + 'ComparisonOperator': 'GT', + } + }, consistent_read=True) + self.assertEqual(results['Count'], 1) + + # Now a scan. + results = self.dynamodb.scan(self.table_name) + self.assertEqual(results['Count'], 2) + s_items = sorted([res['username']['S'] for res in results['Items']]) + self.assertEqual(s_items, ['jane', 'johndoe']) + + self.dynamodb.delete_item(self.table_name, key={ + 'username': {'S': 'johndoe'}, + 'date_joined': {'N': '1366056668'}, + }) + + results = self.dynamodb.scan(self.table_name) + self.assertEqual(results['Count'], 1) + + # Parallel scan (minus client-side threading). + self.dynamodb.batch_write_item({ + self.table_name: [ + { + 'PutRequest': { + 'Item': { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'Johann'}, + 'last_name': {'S': 'Does'}, + 'date_joined': {'N': '1366058000'}, + 'friend_count': {'N': '1'}, + 'friends': {'SS': ['jane']}, + }, + }, + 'PutRequest': { + 'Item': { + 'username': {'S': 'alice'}, + 'first_name': {'S': 'Alice'}, + 'last_name': {'S': 'Expert'}, + 'date_joined': {'N': '1366056800'}, + 'friend_count': {'N': '2'}, + 'friends': {'SS': ['johndoe', 'jane']}, + }, + }, + }, + ] + }) + time.sleep(20) + results = self.dynamodb.scan(self.table_name, segment=0, total_segments=2) + self.assertTrue(results['Count'] in [1, 2]) + results = self.dynamodb.scan(self.table_name, segment=1, total_segments=2) + self.assertTrue(results['Count'] in [1, 2]) + + def test_without_range_key(self): + result = self.create_table( + self.table_name, + [ + { + 'AttributeName': self.hash_key_name, + 'AttributeType': self.hash_key_type, + }, + ], + [ + { + 'AttributeName': self.hash_key_name, + 'KeyType': 'HASH', + }, + ], + self.provisioned_throughput + ) + self.assertEqual( + result['TableDescription']['TableName'], + self.table_name + ) + + description = self.dynamodb.describe_table(self.table_name) + self.assertEqual(description['Table']['ItemCount'], 0) + + # Create some records. + record_1_data = { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056668'}, + 'friend_count': {'N': '3'}, + 'friends': {'SS': ['alice', 'bob', 'jane']}, + } + r1_result = self.dynamodb.put_item(self.table_name, record_1_data) + + # Now try a range-less get. + johndoe = self.dynamodb.get_item(self.table_name, key={ + 'username': {'S': 'johndoe'}, + }, consistent_read=True) + self.assertEqual(johndoe['Item']['username']['S'], 'johndoe') + self.assertEqual(johndoe['Item']['first_name']['S'], 'John') + self.assertEqual(johndoe['Item']['friends']['SS'], [ + 'alice', 'bob', 'jane' + ]) + + def test_throughput_exceeded_regression(self): + tiny_tablename = 'TinyThroughput' + tiny = self.create_table( + tiny_tablename, + self.attributes, + self.schema, + { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1, + } + ) + + self.dynamodb.put_item(tiny_tablename, { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056668'}, + }) + self.dynamodb.put_item(tiny_tablename, { + 'username': {'S': 'jane'}, + 'first_name': {'S': 'Jane'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056669'}, + }) + self.dynamodb.put_item(tiny_tablename, { + 'username': {'S': 'alice'}, + 'first_name': {'S': 'Alice'}, + 'last_name': {'S': 'Expert'}, + 'date_joined': {'N': '1366057000'}, + }) + time.sleep(20) + + for i in range(100): + # This would cause an exception due to a non-existant instance variable. + self.dynamodb.scan(tiny_tablename) + + def test_recursive(self): + result = self.create_table( + self.table_name, + self.attributes, + self.schema, + self.provisioned_throughput, + self.lsi + ) + self.assertEqual( + result['TableDescription']['TableName'], + self.table_name + ) + + description = self.dynamodb.describe_table(self.table_name) + self.assertEqual(description['Table']['ItemCount'], 0) + + # Create some records with one being a recursive shape. + record_1_data = { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056668'}, + 'friend_count': {'N': '3'}, + 'friend_data': {'M': {'username': {'S': 'alice'}, + 'friend_count': {'N': '4'}}} + } + r1_result = self.dynamodb.put_item(self.table_name, record_1_data) + + # Get the data. + record_1 = self.dynamodb.get_item(self.table_name, key={ + 'username': {'S': 'johndoe'}, + 'date_joined': {'N': '1366056668'}, + }, consistent_read=True) + self.assertEqual(record_1['Item']['username']['S'], 'johndoe') + self.assertEqual(record_1['Item']['first_name']['S'], 'John') + recursive_data = record_1['Item']['friend_data']['M'] + self.assertEqual(recursive_data['username']['S'], 'alice') + self.assertEqual(recursive_data['friend_count']['N'], '4') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..771ca94b9d140f4b66c789a2339b5b4f3f54a61d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/autoscale/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/autoscale/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..53b3484c4eed3d0161398629fed0fa73cc876774 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/autoscale/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2011 Reza Lotun http://reza.lotun.name +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/autoscale/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/autoscale/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..c56574fe155390f56c5fb4ab653345a2d3b687f7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/autoscale/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.ec2.autoscale + + +class AutoscaleCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + autoscale = True + regions = boto.ec2.autoscale.regions() + + def sample_service_call(self, conn): + conn.get_all_groups() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/autoscale/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/autoscale/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..ae266cb8eca0d8417ab9c39d6766a32fd366c771 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/autoscale/test_connection.py @@ -0,0 +1,182 @@ +# Copyright (c) 2011 Reza Lotun http://reza.lotun.name +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the AutoscaleConnection +""" + +import time +from boto.ec2.autoscale import AutoScaleConnection +from boto.ec2.autoscale.activity import Activity +from boto.ec2.autoscale.group import AutoScalingGroup, ProcessType +from boto.ec2.autoscale.launchconfig import LaunchConfiguration +from boto.ec2.autoscale.policy import AdjustmentType, MetricCollectionTypes, ScalingPolicy +from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction +from boto.ec2.autoscale.instance import Instance +from boto.ec2.autoscale.tag import Tag +from tests.compat import unittest + + +class AutoscaleConnectionTest(unittest.TestCase): + ec2 = True + autoscale = True + + def test_basic(self): + # NB: as it says on the tin these are really basic tests that only + # (lightly) exercise read-only behaviour - and that's only if you + # have any autoscale groups to introspect. It's useful, however, to + # catch simple errors + + print('--- running %s tests ---' % self.__class__.__name__) + c = AutoScaleConnection() + + self.assertTrue(repr(c).startswith('AutoScaleConnection')) + + groups = c.get_all_groups() + for group in groups: + self.assertIsInstance(group, AutoScalingGroup) + + # get activities + activities = group.get_activities() + + for activity in activities: + self.assertIsInstance(activity, Activity) + + # get launch configs + configs = c.get_all_launch_configurations() + for config in configs: + self.assertIsInstance(config, LaunchConfiguration) + + # get policies + policies = c.get_all_policies() + for policy in policies: + self.assertIsInstance(policy, ScalingPolicy) + + # get scheduled actions + actions = c.get_all_scheduled_actions() + for action in actions: + self.assertIsInstance(action, ScheduledUpdateGroupAction) + + # get instances + instances = c.get_all_autoscaling_instances() + for instance in instances: + self.assertIsInstance(instance, Instance) + + # get all scaling process types + ptypes = c.get_all_scaling_process_types() + for ptype in ptypes: + self.assertTrue(ptype, ProcessType) + + # get adjustment types + adjustments = c.get_all_adjustment_types() + for adjustment in adjustments: + self.assertIsInstance(adjustment, AdjustmentType) + + # get metrics collection types + types = c.get_all_metric_collection_types() + self.assertIsInstance(types, MetricCollectionTypes) + + # create the simplest possible AutoScale group + # first create the launch configuration + time_string = '%d' % int(time.time()) + lc_name = 'lc-%s' % time_string + lc = LaunchConfiguration(name=lc_name, image_id='ami-2272864b', + instance_type='t1.micro') + c.create_launch_configuration(lc) + found = False + lcs = c.get_all_launch_configurations() + for lc in lcs: + if lc.name == lc_name: + found = True + break + assert found + + # now create autoscaling group + group_name = 'group-%s' % time_string + group = AutoScalingGroup(name=group_name, launch_config=lc, + availability_zones=['us-east-1a'], + min_size=1, max_size=1) + c.create_auto_scaling_group(group) + found = False + groups = c.get_all_groups() + for group in groups: + if group.name == group_name: + found = True + break + assert found + + # now create a tag + tag = Tag(key='foo', value='bar', resource_id=group_name, + propagate_at_launch=True) + c.create_or_update_tags([tag]) + + found = False + tags = c.get_all_tags() + for tag in tags: + if tag.resource_id == group_name and tag.key == 'foo': + found = True + break + assert found + + c.delete_tags([tag]) + + # shutdown instances and wait for them to disappear + group.shutdown_instances() + instances = True + while instances: + time.sleep(5) + groups = c.get_all_groups() + for group in groups: + if group.name == group_name: + if not group.instances: + instances = False + + group.delete() + lc.delete() + + found = True + while found: + found = False + time.sleep(5) + tags = c.get_all_tags() + for tag in tags: + if tag.resource_id == group_name and tag.key == 'foo': + found = True + + assert not found + + print('--- tests completed ---') + + def test_ebs_optimized_regression(self): + c = AutoScaleConnection() + time_string = '%d' % int(time.time()) + lc_name = 'lc-%s' % time_string + lc = LaunchConfiguration( + name=lc_name, + image_id='ami-2272864b', + instance_type='t1.micro', + ebs_optimized=True + ) + # This failed due to the difference between native Python ``True/False`` + # & the expected string variants. + c.create_launch_configuration(lc) + self.addCleanup(c.delete_launch_configuration, lc_name) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/cloudwatch/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/cloudwatch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..771ca94b9d140f4b66c789a2339b5b4f3f54a61d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/cloudwatch/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/cloudwatch/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/cloudwatch/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..6a5c90d6cbcefd56f37da8a87ba2996a6e2f98e4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/cloudwatch/test_cert_verification.py @@ -0,0 +1,38 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +from tests.integration import ServiceCertVerificationTest + +import boto.ec2.cloudwatch +from tests.compat import unittest + + +class CloudWatchCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + cloudwatch = True + regions = boto.ec2.cloudwatch.regions() + + def sample_service_call(self, conn): + conn.describe_alarms() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/cloudwatch/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/cloudwatch/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..a03e7f34b5a591644c654af0b064a8d733d9ba36 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/cloudwatch/test_connection.py @@ -0,0 +1,273 @@ +# Copyright (c) 2010 Hunter Blanks http://artifex.org/~hblanks/ +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Initial, and very limited, unit tests for CloudWatchConnection. +""" + +import datetime + +from boto.ec2.cloudwatch import CloudWatchConnection +from tests.compat import unittest, OrderedDict + +# HTTP response body for CloudWatchConnection.describe_alarms +DESCRIBE_ALARMS_BODY = """ + + mynexttoken + + + 2011-11-18T23:43:59.111Z + + {"version":"1.0","queryDate":"2011-11-18T23:43:59.089+0000","startDate":"2011-11-18T23:30:00.000+0000","statistic":"Maximum","period":60,"recentDatapoints":[1.0,null,null,null,null,null,null,null,null,null,1.0],"threshold":1.0} + arn:aws:cloudwatch:us-east-1:1234:alarm:FancyAlarm + 2011-11-18T23:43:58.489Z + FancyAlarm + OK + 60 + + true + AcmeCo/Cronjobs + 15 + 1.0 + Maximum + + arn:aws:sns:us-east-1:1234:Alerts + + Threshold Crossed: 2 datapoints were not less than the threshold (1.0). The most recent datapoints: [1.0, 1.0]. + + + Job + ANiceCronJob + + + LessThanThreshold + Success + + + 2011-11-19T08:09:20.655Z + + {"version":"1.0","queryDate":"2011-11-19T08:09:20.633+0000","startDate":"2011-11-19T08:07:00.000+0000","statistic":"Maximum","period":60,"recentDatapoints":[1.0],"threshold":1.0} + arn:aws:cloudwatch:us-east-1:1234:alarm:SuprtFancyAlarm + 2011-11-19T16:20:19.687Z + SuperFancyAlarm + OK + 60 + + true + AcmeCo/CronJobs + 60 + 1.0 + Maximum + + arn:aws:sns:us-east-1:1234:alerts + + Threshold Crossed: 1 datapoint (1.0) was not less than the threshold (1.0). + + + Job + ABadCronJob + + + GreaterThanThreshold + Success + + + + + f621311-1463-11e1-95c3-312389123 + +""" + + +class CloudWatchConnectionTest(unittest.TestCase): + ec2 = True + + def test_build_list_params(self): + c = CloudWatchConnection() + params = {} + c.build_list_params( + params, ['thing1', 'thing2', 'thing3'], 'ThingName%d') + expected_params = { + 'ThingName1': 'thing1', + 'ThingName2': 'thing2', + 'ThingName3': 'thing3' + } + self.assertEqual(params, expected_params) + + def test_build_put_params_one(self): + c = CloudWatchConnection() + params = {} + c.build_put_params(params, name="N", value=1, dimensions={"D": "V"}) + expected_params = { + 'MetricData.member.1.MetricName': 'N', + 'MetricData.member.1.Value': 1, + 'MetricData.member.1.Dimensions.member.1.Name': 'D', + 'MetricData.member.1.Dimensions.member.1.Value': 'V', + } + self.assertEqual(params, expected_params) + + def test_build_put_params_multiple_metrics(self): + c = CloudWatchConnection() + params = {} + c.build_put_params(params, name=["N", "M"], value=[1, 2], dimensions={"D": "V"}) + expected_params = { + 'MetricData.member.1.MetricName': 'N', + 'MetricData.member.1.Value': 1, + 'MetricData.member.1.Dimensions.member.1.Name': 'D', + 'MetricData.member.1.Dimensions.member.1.Value': 'V', + 'MetricData.member.2.MetricName': 'M', + 'MetricData.member.2.Value': 2, + 'MetricData.member.2.Dimensions.member.1.Name': 'D', + 'MetricData.member.2.Dimensions.member.1.Value': 'V', + } + self.assertEqual(params, expected_params) + + def test_build_put_params_multiple_dimensions(self): + c = CloudWatchConnection() + params = {} + c.build_put_params(params, name="N", value=[1, 2], dimensions=[{"D": "V"}, {"D": "W"}]) + expected_params = { + 'MetricData.member.1.MetricName': 'N', + 'MetricData.member.1.Value': 1, + 'MetricData.member.1.Dimensions.member.1.Name': 'D', + 'MetricData.member.1.Dimensions.member.1.Value': 'V', + 'MetricData.member.2.MetricName': 'N', + 'MetricData.member.2.Value': 2, + 'MetricData.member.2.Dimensions.member.1.Name': 'D', + 'MetricData.member.2.Dimensions.member.1.Value': 'W', + } + self.assertEqual(params, expected_params) + + def test_build_put_params_multiple_parameter_dimension(self): + self.maxDiff = None + c = CloudWatchConnection() + params = {} + dimensions = [OrderedDict((("D1", "V"), ("D2", "W")))] + c.build_put_params(params, + name="N", + value=[1], + dimensions=dimensions) + expected_params = { + 'MetricData.member.1.MetricName': 'N', + 'MetricData.member.1.Value': 1, + 'MetricData.member.1.Dimensions.member.1.Name': 'D1', + 'MetricData.member.1.Dimensions.member.1.Value': 'V', + 'MetricData.member.1.Dimensions.member.2.Name': 'D2', + 'MetricData.member.1.Dimensions.member.2.Value': 'W', + } + self.assertEqual(params, expected_params) + + def test_build_get_params_multiple_parameter_dimension1(self): + self.maxDiff = None + c = CloudWatchConnection() + params = {} + dimensions = OrderedDict((("D1", "V"), ("D2", "W"))) + c.build_dimension_param(dimensions, params) + expected_params = { + 'Dimensions.member.1.Name': 'D1', + 'Dimensions.member.1.Value': 'V', + 'Dimensions.member.2.Name': 'D2', + 'Dimensions.member.2.Value': 'W', + } + self.assertEqual(params, expected_params) + + def test_build_get_params_multiple_parameter_dimension2(self): + self.maxDiff = None + c = CloudWatchConnection() + params = {} + dimensions = OrderedDict((("D1", ["V1", "V2"]), ("D2", "W"), ("D3", None))) + c.build_dimension_param(dimensions, params) + expected_params = { + 'Dimensions.member.1.Name': 'D1', + 'Dimensions.member.1.Value': 'V1', + 'Dimensions.member.2.Name': 'D1', + 'Dimensions.member.2.Value': 'V2', + 'Dimensions.member.3.Name': 'D2', + 'Dimensions.member.3.Value': 'W', + 'Dimensions.member.4.Name': 'D3', + } + self.assertEqual(params, expected_params) + + def test_build_put_params_invalid(self): + c = CloudWatchConnection() + params = {} + try: + c.build_put_params(params, name=["N", "M"], value=[1, 2, 3]) + except: + pass + else: + self.fail("Should not accept lists of different lengths.") + + def test_get_metric_statistics(self): + c = CloudWatchConnection() + m = c.list_metrics()[0] + end = datetime.datetime.utcnow() + start = end - datetime.timedelta(hours=24 * 14) + c.get_metric_statistics( + 3600 * 24, start, end, m.name, m.namespace, ['Average', 'Sum']) + + def test_put_metric_data(self): + c = CloudWatchConnection() + now = datetime.datetime.utcnow() + name, namespace = 'unit-test-metric', 'boto-unit-test' + c.put_metric_data(namespace, name, 5, now, 'Bytes') + + # Uncomment the following lines for a slower but more thorough + # test. (Hurrah for eventual consistency...) + # + # metric = Metric(connection=c) + # metric.name = name + # metric.namespace = namespace + # time.sleep(60) + # l = metric.query( + # now - datetime.timedelta(seconds=60), + # datetime.datetime.utcnow(), + # 'Average') + # assert l + # for row in l: + # self.assertEqual(row['Unit'], 'Bytes') + # self.assertEqual(row['Average'], 5.0) + + def test_describe_alarms(self): + c = CloudWatchConnection() + + def make_request(*args, **kwargs): + class Body(object): + def __init__(self): + self.status = 200 + + def read(self): + return DESCRIBE_ALARMS_BODY + return Body() + + c.make_request = make_request + alarms = c.describe_alarms() + self.assertEquals(alarms.next_token, 'mynexttoken') + self.assertEquals(alarms[0].name, 'FancyAlarm') + self.assertEquals(alarms[0].comparison, '<') + self.assertEquals(alarms[0].dimensions, {u'Job': [u'ANiceCronJob']}) + self.assertEquals(alarms[1].name, 'SuperFancyAlarm') + self.assertEquals(alarms[1].comparison, '>') + self.assertEquals(alarms[1].dimensions, {u'Job': [u'ABadCronJob']}) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/elb/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/elb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..771ca94b9d140f4b66c789a2339b5b4f3f54a61d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/elb/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/elb/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/elb/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..1b6e2c62d319d2cd5c18945aa3d0ee61b5c0fab0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/elb/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" + +from tests.integration import ServiceCertVerificationTest + +from tests.compat import unittest +import boto.ec2.elb + + +class ELBCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + elb = True + regions = boto.ec2.elb.regions() + + def sample_service_call(self, conn): + conn.get_all_load_balancers() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/elb/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/elb/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..7f1ca80bb84acaa111d826158ab641cd5f4fbc2d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/elb/test_connection.py @@ -0,0 +1,297 @@ +# Copyright (c) 2010 Hunter Blanks http://artifex.org/~hblanks/ +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Initial, and very limited, unit tests for ELBConnection. +""" + +import boto +import time +from tests.compat import unittest +from boto.ec2.elb import ELBConnection +import boto.ec2.elb + + +class ELBConnectionTest(unittest.TestCase): + ec2 = True + + def setUp(self): + """Creates a named load balancer that can be safely + deleted at the end of each test""" + self.conn = ELBConnection() + self.name = 'elb-boto-unit-test' + self.availability_zones = ['us-east-1a'] + self.listeners = [(80, 8000, 'HTTP')] + self.balancer = self.conn.create_load_balancer( + self.name, self.availability_zones, self.listeners) + + # S3 bucket for log tests + self.s3 = boto.connect_s3() + self.timestamp = str(int(time.time())) + self.bucket_name = 'boto-elb-%s' % self.timestamp + self.bucket = self.s3.create_bucket(self.bucket_name) + self.bucket.set_canned_acl('public-read-write') + self.addCleanup(self.cleanup_bucket, self.bucket) + + def cleanup_bucket(self, bucket): + for key in bucket.get_all_keys(): + key.delete() + bucket.delete() + + def tearDown(self): + """ Deletes the test load balancer after every test. + It does not delete EVERY load balancer in your account""" + self.balancer.delete() + + def test_build_list_params(self): + params = {} + self.conn.build_list_params( + params, ['thing1', 'thing2', 'thing3'], 'ThingName%d') + expected_params = { + 'ThingName1': 'thing1', + 'ThingName2': 'thing2', + 'ThingName3': 'thing3' + } + self.assertEqual(params, expected_params) + + # TODO: for these next tests, consider sleeping until our load + # balancer comes up, then testing for connectivity to + # balancer.dns_name, along the lines of the existing EC2 unit tests. + + def test_create_load_balancer(self): + self.assertEqual(self.balancer.name, self.name) + self.assertEqual(self.balancer.availability_zones, + self.availability_zones) + self.assertEqual(self.balancer.listeners, self.listeners) + + balancers = self.conn.get_all_load_balancers() + self.assertEqual([lb.name for lb in balancers], [self.name]) + + def test_create_load_balancer_listeners(self): + more_listeners = [(443, 8001, 'HTTP')] + self.conn.create_load_balancer_listeners(self.name, more_listeners) + balancers = self.conn.get_all_load_balancers() + self.assertEqual([lb.name for lb in balancers], [self.name]) + self.assertEqual( + sorted(l.get_tuple() for l in balancers[0].listeners), + sorted(self.listeners + more_listeners) + ) + + def test_delete_load_balancer_listeners(self): + mod_listeners = [(80, 8000, 'HTTP'), (443, 8001, 'HTTP')] + mod_name = self.name + "-mod" + self.mod_balancer = self.conn.create_load_balancer( + mod_name, self.availability_zones, mod_listeners) + + mod_balancers = self.conn.get_all_load_balancers( + load_balancer_names=[mod_name]) + self.assertEqual([lb.name for lb in mod_balancers], [mod_name]) + self.assertEqual( + sorted([l.get_tuple() for l in mod_balancers[0].listeners]), + sorted(mod_listeners)) + + self.conn.delete_load_balancer_listeners(self.mod_balancer.name, [443]) + mod_balancers = self.conn.get_all_load_balancers( + load_balancer_names=[mod_name]) + self.assertEqual([lb.name for lb in mod_balancers], [mod_name]) + self.assertEqual([l.get_tuple() for l in mod_balancers[0].listeners], + mod_listeners[:1]) + self.mod_balancer.delete() + + def test_create_load_balancer_listeners_with_policies(self): + more_listeners = [(443, 8001, 'HTTP')] + self.conn.create_load_balancer_listeners(self.name, more_listeners) + + lb_policy_name = 'lb-policy' + self.conn.create_lb_cookie_stickiness_policy( + 1000, self.name, lb_policy_name) + self.conn.set_lb_policies_of_listener( + self.name, self.listeners[0][0], lb_policy_name) + + app_policy_name = 'app-policy' + self.conn.create_app_cookie_stickiness_policy( + 'appcookie', self.name, app_policy_name) + self.conn.set_lb_policies_of_listener( + self.name, more_listeners[0][0], app_policy_name) + + balancers = self.conn.get_all_load_balancers( + load_balancer_names=[self.name]) + self.assertEqual([lb.name for lb in balancers], [self.name]) + self.assertEqual( + sorted(l.get_tuple() for l in balancers[0].listeners), + sorted(self.listeners + more_listeners) + ) + # Policy names should be checked here once they are supported + # in the Listener object. + + def test_create_load_balancer_backend_with_policies(self): + other_policy_name = 'enable-proxy-protocol' + backend_port = 8081 + self.conn.create_lb_policy( + self.name, other_policy_name, + 'ProxyProtocolPolicyType', {'ProxyProtocol': True}) + self.conn.set_lb_policies_of_backend_server( + self.name, backend_port, [other_policy_name]) + + balancers = self.conn.get_all_load_balancers( + load_balancer_names=[self.name]) + self.assertEqual([lb.name for lb in balancers], [self.name]) + self.assertEqual(len(balancers[0].policies.other_policies), 1) + self.assertEqual(balancers[0].policies.other_policies[0].policy_name, + other_policy_name) + self.assertEqual(len(balancers[0].backends), 1) + self.assertEqual(balancers[0].backends[0].instance_port, backend_port) + self.assertEqual(balancers[0].backends[0].policies[0].policy_name, + other_policy_name) + + self.conn.set_lb_policies_of_backend_server(self.name, backend_port, + []) + + balancers = self.conn.get_all_load_balancers( + load_balancer_names=[self.name]) + self.assertEqual([lb.name for lb in balancers], [self.name]) + self.assertEqual(len(balancers[0].policies.other_policies), 1) + self.assertEqual(len(balancers[0].backends), 0) + + def test_create_load_balancer_complex_listeners(self): + complex_listeners = [ + (8080, 80, 'HTTP', 'HTTP'), + (2525, 25, 'TCP', 'TCP'), + ] + + self.conn.create_load_balancer_listeners( + self.name, + complex_listeners=complex_listeners + ) + + balancers = self.conn.get_all_load_balancers( + load_balancer_names=[self.name] + ) + self.assertEqual([lb.name for lb in balancers], [self.name]) + self.assertEqual( + sorted(l.get_complex_tuple() for l in balancers[0].listeners), + # We need an extra 'HTTP' here over what ``self.listeners`` uses. + sorted([(80, 8000, 'HTTP', 'HTTP')] + complex_listeners) + ) + + def test_load_balancer_access_log(self): + attributes = self.balancer.get_attributes() + + self.assertEqual(False, attributes.access_log.enabled) + + attributes.access_log.enabled = True + attributes.access_log.s3_bucket_name = self.bucket_name + attributes.access_log.s3_bucket_prefix = 'access-logs' + attributes.access_log.emit_interval = 5 + + self.conn.modify_lb_attribute(self.balancer.name, 'accessLog', + attributes.access_log) + + new_attributes = self.balancer.get_attributes() + + self.assertEqual(True, new_attributes.access_log.enabled) + self.assertEqual(self.bucket_name, + new_attributes.access_log.s3_bucket_name) + self.assertEqual('access-logs', + new_attributes.access_log.s3_bucket_prefix) + self.assertEqual(5, new_attributes.access_log.emit_interval) + + def test_load_balancer_get_attributes(self): + attributes = self.balancer.get_attributes() + connection_draining = self.conn.get_lb_attribute(self.balancer.name, + 'ConnectionDraining') + self.assertEqual(connection_draining.enabled, + attributes.connection_draining.enabled) + self.assertEqual(connection_draining.timeout, + attributes.connection_draining.timeout) + + access_log = self.conn.get_lb_attribute(self.balancer.name, + 'AccessLog') + self.assertEqual(access_log.enabled, attributes.access_log.enabled) + self.assertEqual(access_log.s3_bucket_name, + attributes.access_log.s3_bucket_name) + self.assertEqual(access_log.s3_bucket_prefix, + attributes.access_log.s3_bucket_prefix) + self.assertEqual(access_log.emit_interval, + attributes.access_log.emit_interval) + + cross_zone_load_balancing = self.conn.get_lb_attribute( + self.balancer.name, 'CrossZoneLoadBalancing') + self.assertEqual(cross_zone_load_balancing, + attributes.cross_zone_load_balancing.enabled) + + def change_and_verify_load_balancer_connection_draining( + self, enabled, timeout=None): + attributes = self.balancer.get_attributes() + + attributes.connection_draining.enabled = enabled + if timeout is not None: + attributes.connection_draining.timeout = timeout + + self.conn.modify_lb_attribute( + self.balancer.name, 'ConnectionDraining', + attributes.connection_draining) + + attributes = self.balancer.get_attributes() + self.assertEqual(enabled, attributes.connection_draining.enabled) + if timeout is not None: + self.assertEqual(timeout, attributes.connection_draining.timeout) + + def test_load_balancer_connection_draining_config(self): + self.change_and_verify_load_balancer_connection_draining(True, 128) + self.change_and_verify_load_balancer_connection_draining(True, 256) + self.change_and_verify_load_balancer_connection_draining(False) + self.change_and_verify_load_balancer_connection_draining(True, 64) + + def test_set_load_balancer_policies_of_listeners(self): + more_listeners = [(443, 8001, 'HTTP')] + self.conn.create_load_balancer_listeners(self.name, more_listeners) + + lb_policy_name = 'lb-policy' + self.conn.create_lb_cookie_stickiness_policy( + 1000, + self.name, + lb_policy_name + ) + self.conn.set_lb_policies_of_listener( + self.name, + self.listeners[0][0], + lb_policy_name + ) + + # Try to remove the policy by passing empty list. + # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_SetLoadBalancerPoliciesOfListener.html + # documents this as the way to remove policies. + self.conn.set_lb_policies_of_listener( + self.name, + self.listeners[0][0], + [] + ) + + def test_can_make_sigv4_call(self): + connection = boto.ec2.elb.connect_to_region('eu-central-1') + lbs = connection.get_all_load_balancers() + self.assertTrue(isinstance(lbs, list)) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..5ca8ceca013a222947c0297397123e3a5be4cf86 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.ec2 + + +class EC2CertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + ec2 = True + regions = boto.ec2.regions() + + def sample_service_call(self, conn): + conn.get_all_reservations() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..f01179879b152ca4e6f19bc61b432b51b55acee4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/test_connection.py @@ -0,0 +1,246 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2009, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the EC2Connection +""" + +import unittest +import time +import telnetlib +import socket + +from nose.plugins.attrib import attr +from boto.ec2.connection import EC2Connection +from boto.exception import EC2ResponseError +import boto.ec2 + + +class EC2ConnectionTest(unittest.TestCase): + ec2 = True + + @attr('notdefault') + def test_launch_permissions(self): + # this is my user_id, if you want to run these tests you should + # replace this with yours or they won't work + user_id = '963068290131' + print('--- running EC2Connection tests ---') + c = EC2Connection() + # get list of private AMI's + rs = c.get_all_images(owners=[user_id]) + assert len(rs) > 0 + # now pick the first one + image = rs[0] + # temporarily make this image runnable by everyone + status = image.set_launch_permissions(group_names=['all']) + assert status + d = image.get_launch_permissions() + assert 'groups' in d + assert len(d['groups']) > 0 + # now remove that permission + status = image.remove_launch_permissions(group_names=['all']) + assert status + time.sleep(10) + d = image.get_launch_permissions() + assert 'groups' not in d + + def test_1_basic(self): + # create 2 new security groups + c = EC2Connection() + group1_name = 'test-%d' % int(time.time()) + group_desc = 'This is a security group created during unit testing' + group1 = c.create_security_group(group1_name, group_desc) + time.sleep(2) + group2_name = 'test-%d' % int(time.time()) + group_desc = 'This is a security group created during unit testing' + group2 = c.create_security_group(group2_name, group_desc) + # now get a listing of all security groups and look for our new one + rs = c.get_all_security_groups() + found = False + for g in rs: + if g.name == group1_name: + found = True + assert found + # now pass arg to filter results to only our new group + rs = c.get_all_security_groups([group1_name]) + assert len(rs) == 1 + # try some group to group authorizations/revocations + # first try the old style + status = c.authorize_security_group(group1.name, + group2.name, + group2.owner_id) + assert status + status = c.revoke_security_group(group1.name, + group2.name, + group2.owner_id) + assert status + # now try specifying a specific port + status = c.authorize_security_group(group1.name, + group2.name, + group2.owner_id, + 'tcp', 22, 22) + assert status + status = c.revoke_security_group(group1.name, + group2.name, + group2.owner_id, + 'tcp', 22, 22) + assert status + + # now delete the second security group + status = c.delete_security_group(group2_name) + # now make sure it's really gone + rs = c.get_all_security_groups() + found = False + for g in rs: + if g.name == group2_name: + found = True + assert not found + + group = group1 + + # now try to launch apache image with our new security group + rs = c.get_all_images() + img_loc = 'ec2-public-images/fedora-core4-apache.manifest.xml' + for image in rs: + if image.location == img_loc: + break + reservation = image.run(security_groups=[group.name]) + instance = reservation.instances[0] + while instance.state != 'running': + print('\tinstance is %s' % instance.state) + time.sleep(30) + instance.update() + # instance in now running, try to telnet to port 80 + t = telnetlib.Telnet() + try: + t.open(instance.dns_name, 80) + except socket.error: + pass + # now open up port 80 and try again, it should work + group.authorize('tcp', 80, 80, '0.0.0.0/0') + t.open(instance.dns_name, 80) + t.close() + # now revoke authorization and try again + group.revoke('tcp', 80, 80, '0.0.0.0/0') + try: + t.open(instance.dns_name, 80) + except socket.error: + pass + # now kill the instance and delete the security group + instance.terminate() + + # check that state and previous_state have updated + assert instance.state == 'shutting-down' + assert instance.state_code == 32 + assert instance.previous_state == 'running' + assert instance.previous_state_code == 16 + + # unfortunately, I can't delete the sg within this script + #sg.delete() + + # create a new key pair + key_name = 'test-%d' % int(time.time()) + status = c.create_key_pair(key_name) + assert status + # now get a listing of all key pairs and look for our new one + rs = c.get_all_key_pairs() + found = False + for k in rs: + if k.name == key_name: + found = True + assert found + # now pass arg to filter results to only our new key pair + rs = c.get_all_key_pairs([key_name]) + assert len(rs) == 1 + key_pair = rs[0] + # now delete the key pair + status = c.delete_key_pair(key_name) + # now make sure it's really gone + rs = c.get_all_key_pairs() + found = False + for k in rs: + if k.name == key_name: + found = True + assert not found + + # short test around Paid AMI capability + demo_paid_ami_id = 'ami-bd9d78d4' + demo_paid_ami_product_code = 'A79EC0DB' + l = c.get_all_images([demo_paid_ami_id]) + assert len(l) == 1 + assert len(l[0].product_codes) == 1 + assert l[0].product_codes[0] == demo_paid_ami_product_code + + print('--- tests completed ---') + + def test_dry_run(self): + c = EC2Connection() + dry_run_msg = 'Request would have succeeded, but DryRun flag is set.' + + try: + rs = c.get_all_images(dry_run=True) + self.fail("Should have gotten an exception") + except EC2ResponseError as e: + self.assertTrue(dry_run_msg in str(e)) + + try: + rs = c.run_instances( + image_id='ami-a0cd60c9', + instance_type='m1.small', + dry_run=True + ) + self.fail("Should have gotten an exception") + except EC2ResponseError as e: + self.assertTrue(dry_run_msg in str(e)) + + # Need an actual instance for the rest of this... + rs = c.run_instances( + image_id='ami-a0cd60c9', + instance_type='m1.small' + ) + time.sleep(120) + + try: + rs = c.stop_instances( + instance_ids=[rs.instances[0].id], + dry_run=True + ) + self.fail("Should have gotten an exception") + except EC2ResponseError as e: + self.assertTrue(dry_run_msg in str(e)) + + try: + rs = c.terminate_instances( + instance_ids=[rs.instances[0].id], + dry_run=True + ) + self.fail("Should have gotten an exception") + except EC2ResponseError as e: + self.assertTrue(dry_run_msg in str(e)) + + # And kill it. + rs.instances[0].terminate() + + def test_can_get_all_instances_sigv4(self): + connection = boto.ec2.connect_to_region('eu-central-1') + self.assertTrue(isinstance(connection.get_all_instances(), list)) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/vpc/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/vpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/vpc/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/vpc/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..6e7b28896bf779c894a9a7cefdec9e5ff8f85bf8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2/vpc/test_connection.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import time + +import boto +from boto.compat import six +from tests.compat import unittest +from boto.ec2.networkinterface import NetworkInterfaceCollection +from boto.ec2.networkinterface import NetworkInterfaceSpecification +from boto.ec2.networkinterface import PrivateIPAddress + + +class TestVPCConnection(unittest.TestCase): + + def setUp(self): + # Registry of instances to be removed + self.instances = [] + # Registry for cleaning up the vpc after all instances are terminated + # in the format [ ( func, (arg1, ... argn) ) ] + self.post_terminate_cleanups = [] + + self.api = boto.connect_vpc() + self.vpc = self.api.create_vpc('10.0.0.0/16') + + # Need time for the VPC to be in place. :/ + time.sleep(5) + self.subnet = self.api.create_subnet(self.vpc.id, '10.0.0.0/24') + # Register the subnet to be deleted after instance termination + self.post_terminate_cleanups.append((self.api.delete_subnet, (self.subnet.id,))) + + # Need time for the subnet to be in place. + time.sleep(10) + + def post_terminate_cleanup(self): + """Helper to run clean up tasks after instances are removed.""" + for fn, args in self.post_terminate_cleanups: + fn(*args) + # Give things time to catch up each time + time.sleep(10) + + # Now finally delete the vpc + if self.vpc: + self.api.delete_vpc(self.vpc.id) + + def terminate_instances(self): + """Helper to remove all instances and kick off additional cleanup + once they are terminated. + """ + for instance in self.instances: + self.terminate_instance(instance) + self.post_terminate_cleanup() + + def terminate_instance(self, instance): + instance.terminate() + for i in six.moves.range(300): + instance.update() + if instance.state == 'terminated': + # Give it a litle more time to settle. + time.sleep(30) + return + else: + time.sleep(10) + + def delete_elastic_ip(self, eip): + # Fetch a new copy of the eip so we're up to date + new_eip = self.api.get_all_addresses([eip.public_ip])[0] + if new_eip.association_id: + new_eip.disassociate() + new_eip.release() + time.sleep(10) + + def test_multi_ip_create(self): + interface = NetworkInterfaceSpecification( + device_index=0, subnet_id=self.subnet.id, + private_ip_address='10.0.0.21', + description="This is a test interface using boto.", + delete_on_termination=True, private_ip_addresses=[ + PrivateIPAddress(private_ip_address='10.0.0.22', + primary=False), + PrivateIPAddress(private_ip_address='10.0.0.23', + primary=False), + PrivateIPAddress(private_ip_address='10.0.0.24', + primary=False)]) + interfaces = NetworkInterfaceCollection(interface) + + reservation = self.api.run_instances(image_id='ami-a0cd60c9', instance_type='m1.small', + network_interfaces=interfaces) + # Give it a few seconds to start up. + time.sleep(10) + instance = reservation.instances[0] + self.addCleanup(self.terminate_instance, instance) + retrieved = self.api.get_all_reservations(instance_ids=[instance.id]) + self.assertEqual(len(retrieved), 1) + retrieved_instances = retrieved[0].instances + self.assertEqual(len(retrieved_instances), 1) + retrieved_instance = retrieved_instances[0] + + self.assertEqual(len(retrieved_instance.interfaces), 1) + interface = retrieved_instance.interfaces[0] + + private_ip_addresses = interface.private_ip_addresses + self.assertEqual(len(private_ip_addresses), 4) + self.assertEqual(private_ip_addresses[0].private_ip_address, + '10.0.0.21') + self.assertEqual(private_ip_addresses[0].primary, True) + self.assertEqual(private_ip_addresses[1].private_ip_address, + '10.0.0.22') + self.assertEqual(private_ip_addresses[2].private_ip_address, + '10.0.0.23') + self.assertEqual(private_ip_addresses[3].private_ip_address, + '10.0.0.24') + + def test_associate_public_ip(self): + # Supplying basically nothing ought to work. + interface = NetworkInterfaceSpecification( + associate_public_ip_address=True, + subnet_id=self.subnet.id, + # Just for testing. + delete_on_termination=True + ) + interfaces = NetworkInterfaceCollection(interface) + + reservation = self.api.run_instances( + image_id='ami-a0cd60c9', + instance_type='m1.small', + network_interfaces=interfaces + ) + instance = reservation.instances[0] + self.instances.append(instance) + self.addCleanup(self.terminate_instances) + + # Give it a **LONG** time to start up. + # Because the public IP won't be there right away. + time.sleep(60) + + retrieved = self.api.get_all_reservations( + instance_ids=[ + instance.id + ] + ) + self.assertEqual(len(retrieved), 1) + retrieved_instances = retrieved[0].instances + self.assertEqual(len(retrieved_instances), 1) + retrieved_instance = retrieved_instances[0] + + self.assertEqual(len(retrieved_instance.interfaces), 1) + interface = retrieved_instance.interfaces[0] + + # There ought to be a public IP there. + # We can't reason about the IP itself, so just make sure it vaguely + # resembles an IP (& isn't empty/``None``)... + self.assertTrue(interface.publicIp.count('.') >= 3) + + def test_associate_elastic_ip(self): + interface = NetworkInterfaceSpecification( + associate_public_ip_address=False, + subnet_id=self.subnet.id, + # Just for testing. + delete_on_termination=True + ) + interfaces = NetworkInterfaceCollection(interface) + + reservation = self.api.run_instances( + image_id='ami-a0cd60c9', + instance_type='m1.small', + network_interfaces=interfaces + ) + instance = reservation.instances[0] + # Register instance to be removed + self.instances.append(instance) + # Add terminate instances helper as cleanup command + self.addCleanup(self.terminate_instances) + + # Create an internet gateway so we can attach an eip + igw = self.api.create_internet_gateway() + # Wait on gateway before attaching + time.sleep(5) + # Attach and register clean up tasks + self.api.attach_internet_gateway(igw.id, self.vpc.id) + self.post_terminate_cleanups.append((self.api.detach_internet_gateway, (igw.id, self.vpc.id))) + self.post_terminate_cleanups.append((self.api.delete_internet_gateway, (igw.id,))) + + # Allocate an elastic ip to this vpc + eip = self.api.allocate_address('vpc') + self.post_terminate_cleanups.append((self.delete_elastic_ip, (eip,))) + + # Wait on instance and eip then try to associate directly to instance + time.sleep(60) + eip.associate(instance.id) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2containerservice/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2containerservice/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8bfcf6afba4fd6b6cbaadf962851c8ff662840bb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2containerservice/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2containerservice/test_ec2containerservice.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2containerservice/test_ec2containerservice.py new file mode 100644 index 0000000000000000000000000000000000000000..32a677b84bef4ea52fd61e6d60c2bfa6fdbcdba2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ec2containerservice/test_ec2containerservice.py @@ -0,0 +1,40 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.ec2containerservice.exceptions import ClientException +from tests.compat import unittest + + +class TestEC2ContainerService(unittest.TestCase): + def setUp(self): + self.ecs = boto.connect_ec2containerservice() + + def test_list_clusters(self): + response = self.ecs.list_clusters() + self.assertIn('clusterArns', + response['ListClustersResponse']['ListClustersResult']) + + def test_handle_not_found_exception(self): + with self.assertRaises(ClientException): + # Try to stop a task with an invalid arn. + self.ecs.stop_task(task='foo') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/elasticache/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/elasticache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/elasticache/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/elasticache/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..42e3008bc672fb70c85247cd86bdeb6076a570be --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/elasticache/test_layer1.py @@ -0,0 +1,67 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import time +from tests.unit import unittest + +from boto.elasticache import layer1 +from boto.exception import BotoServerError + + +class TestElastiCacheConnection(unittest.TestCase): + def setUp(self): + self.elasticache = layer1.ElastiCacheConnection() + + def wait_until_cluster_available(self, cluster_id): + timeout = time.time() + 600 + while time.time() < timeout: + response = self.elasticache.describe_cache_clusters(cluster_id) + status = (response['DescribeCacheClustersResponse'] + ['DescribeCacheClustersResult'] + ['CacheClusters'][0]['CacheClusterStatus']) + if status == 'available': + break + time.sleep(5) + else: + self.fail('Timeout waiting for cache cluster %r' + 'to become available.' % cluster_id) + + def test_create_delete_cache_cluster(self): + cluster_id = 'cluster-id2' + self.elasticache.create_cache_cluster( + cluster_id, 1, 'cache.t1.micro', 'memcached') + self.wait_until_cluster_available(cluster_id) + + self.elasticache.delete_cache_cluster(cluster_id) + timeout = time.time() + 600 + while time.time() < timeout: + try: + self.elasticache.describe_cache_clusters(cluster_id) + except BotoServerError: + break + time.sleep(5) + else: + self.fail('Timeout waiting for cache cluster %s' + 'to be deleted.' % cluster_id) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/elastictranscoder/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/elastictranscoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/elastictranscoder/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/elastictranscoder/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..321a8ecad147d54bc8c360a19fd3cc4d0f3bf4a2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/elastictranscoder/test_cert_verification.py @@ -0,0 +1,34 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.integration import ServiceCertVerificationTest + +import boto.elastictranscoder +from tests.compat import unittest + + +class ElasticTranscoderCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + elastictranscoder = True + regions = boto.elastictranscoder.regions() + + def sample_service_call(self, conn): + conn.list_pipelines() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/elastictranscoder/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/elastictranscoder/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..3e7690335a87064cf8fb9153c2e548d42cb106bf --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/elastictranscoder/test_layer1.py @@ -0,0 +1,115 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import time + +from boto.elastictranscoder.layer1 import ElasticTranscoderConnection +from boto.elastictranscoder.exceptions import ValidationException +from tests.compat import unittest +import boto.s3 +import boto.sns +import boto.iam +import boto.sns + + +class TestETSLayer1PipelineManagement(unittest.TestCase): + def setUp(self): + self.api = ElasticTranscoderConnection() + self.s3 = boto.connect_s3() + self.sns = boto.connect_sns() + self.iam = boto.connect_iam() + self.sns = boto.connect_sns() + self.timestamp = str(int(time.time())) + self.input_bucket = 'boto-pipeline-%s' % self.timestamp + self.output_bucket = 'boto-pipeline-out-%s' % self.timestamp + self.role_name = 'boto-ets-role-%s' % self.timestamp + self.pipeline_name = 'boto-pipeline-%s' % self.timestamp + self.s3.create_bucket(self.input_bucket) + self.s3.create_bucket(self.output_bucket) + self.addCleanup(self.s3.delete_bucket, self.input_bucket) + self.addCleanup(self.s3.delete_bucket, self.output_bucket) + self.role = self.iam.create_role(self.role_name) + self.role_arn = self.role['create_role_response']['create_role_result']\ + ['role']['arn'] + self.addCleanup(self.iam.delete_role, self.role_name) + + def create_pipeline(self): + pipeline = self.api.create_pipeline( + self.pipeline_name, self.input_bucket, + self.output_bucket, self.role_arn, + {'Progressing': '', 'Completed': '', 'Warning': '', 'Error': ''}) + pipeline_id = pipeline['Pipeline']['Id'] + + self.addCleanup(self.api.delete_pipeline, pipeline_id) + return pipeline_id + + def test_create_delete_pipeline(self): + pipeline = self.api.create_pipeline( + self.pipeline_name, self.input_bucket, + self.output_bucket, self.role_arn, + {'Progressing': '', 'Completed': '', 'Warning': '', 'Error': ''}) + pipeline_id = pipeline['Pipeline']['Id'] + + self.api.delete_pipeline(pipeline_id) + + def test_can_retrieve_pipeline_information(self): + pipeline_id = self.create_pipeline() + + # The pipeline shows up in list_pipelines + pipelines = self.api.list_pipelines()['Pipelines'] + pipeline_names = [p['Name'] for p in pipelines] + self.assertIn(self.pipeline_name, pipeline_names) + + # The pipeline shows up in read_pipeline + response = self.api.read_pipeline(pipeline_id) + self.assertEqual(response['Pipeline']['Id'], pipeline_id) + + def test_update_pipeline(self): + pipeline_id = self.create_pipeline() + self.api.update_pipeline_status(pipeline_id, 'Paused') + + response = self.api.read_pipeline(pipeline_id) + self.assertEqual(response['Pipeline']['Status'], 'Paused') + + def test_update_pipeline_notification(self): + pipeline_id = self.create_pipeline() + response = self.sns.create_topic('pipeline-errors') + topic_arn = response['CreateTopicResponse']['CreateTopicResult']\ + ['TopicArn'] + self.addCleanup(self.sns.delete_topic, topic_arn) + + self.api.update_pipeline_notifications( + pipeline_id, + {'Progressing': '', 'Completed': '', + 'Warning': '', 'Error': topic_arn}) + + response = self.api.read_pipeline(pipeline_id) + self.assertEqual(response['Pipeline']['Notifications']['Error'], + topic_arn) + + def test_list_jobs_by_pipeline(self): + pipeline_id = self.create_pipeline() + response = self.api.list_jobs_by_pipeline(pipeline_id) + self.assertEqual(response['Jobs'], []) + + def test_proper_error_when_pipeline_does_not_exist(self): + with self.assertRaises(ValidationException): + self.api.read_pipeline('badpipelineid') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/emr/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/emr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b3fc3a0c31bff21cc9c8e0a0189f02cee820e64e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/emr/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/emr/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/emr/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..b29b52574767244f59df5c031710452714b7a27d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/emr/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all EMR endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.emr + + +class EMRCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + emr = True + regions = boto.emr.regions() + + def sample_service_call(self, conn): + conn.describe_jobflows() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5326afc11bfa6c77cdc0ef5f34f23b87383d1005 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..fea1a64af4812fb51fdc3180a4b87b2e22b24167 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.glacier + + +class GlacierCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + glacier = True + regions = boto.glacier.regions() + + def sample_service_call(self, conn): + conn.list_vaults() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..0d38da2735db4ec94f34c763b8a6c88342e347e2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/test_layer1.py @@ -0,0 +1,44 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from tests.unit import unittest + +from boto.glacier.layer1 import Layer1 + + +class TestGlacierLayer1(unittest.TestCase): + glacier = True + + def delete_vault(self, vault_name): + pass + + def test_initialiate_multipart_upload(self): + # Create a vault, initiate a multipart upload, + # then cancel it. + glacier = Layer1() + glacier.create_vault('l1testvault') + self.addCleanup(glacier.delete_vault, 'l1testvault') + upload_id = glacier.initiate_multipart_upload('l1testvault', 4 * 1024 * 1024, + 'double spaces here')['UploadId'] + self.addCleanup(glacier.abort_multipart_upload, 'l1testvault', upload_id) + response = glacier.list_multipart_uploads('l1testvault')['UploadsList'] + self.assertEqual(len(response), 1) + self.assertEqual(response[0]['MultipartUploadId'], upload_id) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/test_layer2.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/test_layer2.py new file mode 100644 index 0000000000000000000000000000000000000000..caa44fa522beffac2a1e1cd0b5d03f222b0702f1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/glacier/test_layer2.py @@ -0,0 +1,45 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import time +from tests.unit import unittest + +from boto.glacier.layer2 import Layer1, Layer2 + + +class TestGlacierLayer2(unittest.TestCase): + glacier = True + + def setUp(self): + self.layer2 = Layer2() + self.vault_name = 'testvault%s' % int(time.time()) + + def test_create_delete_vault(self): + vault = self.layer2.create_vault(self.vault_name) + retrieved_vault = self.layer2.get_vault(self.vault_name) + self.layer2.delete_vault(self.vault_name) + self.assertEqual(vault.name, retrieved_vault.name) + self.assertEqual(vault.arn, retrieved_vault.arn) + self.assertEqual(vault.creation_date, retrieved_vault.creation_date) + self.assertEqual(vault.last_inventory_date, + retrieved_vault.last_inventory_date) + self.assertEqual(vault.number_of_archives, + retrieved_vault.number_of_archives) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/cb_test_harness.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/cb_test_harness.py new file mode 100644 index 0000000000000000000000000000000000000000..024b0cf036aa77d8d05b98b430b1f2776b26a539 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/cb_test_harness.py @@ -0,0 +1,76 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Test harness that allows us to raise exceptions, change file content, +and record the byte transfer callback sequence, to test various resumable +upload and download cases. The 'call' method of this harness can be passed +as the 'cb' parameter to boto.s3.Key.send_file() and boto.s3.Key.get_file(), +allowing testing of various file upload/download conditions. +""" + +import socket +import time + + +class CallbackTestHarness(object): + + def __init__(self, fail_after_n_bytes=0, num_times_to_fail=1, + exception=socket.error('mock socket error', 0), + fp_to_change=None, fp_change_pos=None, + delay_after_change=None): + self.fail_after_n_bytes = fail_after_n_bytes + self.num_times_to_fail = num_times_to_fail + self.exception = exception + # If fp_to_change and fp_change_pos are specified, 3 bytes will be + # written at that position just before the first exception is thrown. + self.fp_to_change = fp_to_change + self.fp_change_pos = fp_change_pos + self.delay_after_change = delay_after_change + self.num_failures = 0 + self.transferred_seq_before_first_failure = [] + self.transferred_seq_after_first_failure = [] + + def call(self, total_bytes_transferred, unused_total_size): + """ + To use this test harness, pass the 'call' method of the instantiated + object as the cb param to the set_contents_from_file() or + get_contents_to_file() call. + """ + # Record transfer sequence to allow verification. + if self.num_failures: + self.transferred_seq_after_first_failure.append( + total_bytes_transferred) + else: + self.transferred_seq_before_first_failure.append( + total_bytes_transferred) + if (total_bytes_transferred >= self.fail_after_n_bytes and + self.num_failures < self.num_times_to_fail): + self.num_failures += 1 + if self.fp_to_change and self.fp_change_pos is not None: + cur_pos = self.fp_to_change.tell() + self.fp_to_change.seek(self.fp_change_pos) + self.fp_to_change.write('abc') + self.fp_to_change.seek(cur_pos) + if self.delay_after_change: + time.sleep(self.delay_after_change) + self.called = True + raise self.exception \ No newline at end of file diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_basic.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..f2fc9ec07da426c662fdd1ea1824f397328468bc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_basic.py @@ -0,0 +1,434 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2011, Nexenta Systems, Inc. +# Copyright (c) 2012, Google, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some integration tests for the GSConnection +""" + +import os +import re +import StringIO +import urllib +import xml.sax + +from boto import handler +from boto import storage_uri +from boto.gs.acl import ACL +from boto.gs.cors import Cors +from boto.gs.lifecycle import LifecycleConfig +from tests.integration.gs.testcase import GSTestCase + + +CORS_EMPTY = '' +CORS_DOC = ('origin1.example.com' + 'origin2.example.com' + 'GETPUT' + 'POST' + 'foo' + 'bar' + '') + +LIFECYCLE_EMPTY = ('' + '') +LIFECYCLE_DOC = ('' + '' + '' + '365' + '2013-01-15' + '3' + 'true' + '') +LIFECYCLE_CONDITIONS = {'Age': '365', + 'CreatedBefore': '2013-01-15', + 'NumberOfNewerVersions': '3', + 'IsLive': 'true'} + +# Regexp for matching project-private default object ACL. +PROJECT_PRIVATE_RE = ('\s*\s*\s*' + '\s*[0-9a-fA-F]+' + '\s*FULL_CONTROL\s*\s*' + '\s*[0-9a-fA-F]+' + '\s*FULL_CONTROL\s*\s*' + '\s*[0-9a-fA-F]+' + '\s*READ\s*' + '\s*\s*') + + +class GSBasicTest(GSTestCase): + """Tests some basic GCS functionality.""" + + def test_read_write(self): + """Tests basic read/write to keys.""" + bucket = self._MakeBucket() + bucket_name = bucket.name + # now try a get_bucket call and see if it's really there + bucket = self._GetConnection().get_bucket(bucket_name) + key_name = 'foobar' + k = bucket.new_key(key_name) + s1 = 'This is a test of file upload and download' + k.set_contents_from_string(s1) + tmpdir = self._MakeTempDir() + fpath = os.path.join(tmpdir, key_name) + fp = open(fpath, 'wb') + # now get the contents from gcs to a local file + k.get_contents_to_file(fp) + fp.close() + fp = open(fpath) + # check to make sure content read from gcs is identical to original + self.assertEqual(s1, fp.read()) + fp.close() + # Use generate_url to get the contents + url = self._conn.generate_url(900, 'GET', bucket=bucket.name, key=key_name) + f = urllib.urlopen(url) + self.assertEqual(s1, f.read()) + f.close() + # check to make sure set_contents_from_file is working + sfp = StringIO.StringIO('foo') + k.set_contents_from_file(sfp) + self.assertEqual(k.get_contents_as_string(), 'foo') + sfp2 = StringIO.StringIO('foo2') + k.set_contents_from_file(sfp2) + self.assertEqual(k.get_contents_as_string(), 'foo2') + + def test_get_all_keys(self): + """Tests get_all_keys.""" + phony_mimetype = 'application/x-boto-test' + headers = {'Content-Type': phony_mimetype} + tmpdir = self._MakeTempDir() + fpath = os.path.join(tmpdir, 'foobar1') + fpath2 = os.path.join(tmpdir, 'foobar') + with open(fpath2, 'w') as f: + f.write('test-data') + bucket = self._MakeBucket() + + # First load some data for the first one, overriding content type. + k = bucket.new_key('foobar') + s1 = 'test-contents' + s2 = 'test-contents2' + k.name = 'foo/bar' + k.set_contents_from_string(s1, headers) + k.name = 'foo/bas' + k.set_contents_from_filename(fpath2) + k.name = 'foo/bat' + k.set_contents_from_string(s1) + k.name = 'fie/bar' + k.set_contents_from_string(s1) + k.name = 'fie/bas' + k.set_contents_from_string(s1) + k.name = 'fie/bat' + k.set_contents_from_string(s1) + # try resetting the contents to another value + md5 = k.md5 + k.set_contents_from_string(s2) + self.assertNotEqual(k.md5, md5) + + fp2 = open(fpath2, 'rb') + k.md5 = None + k.base64md5 = None + k.set_contents_from_stream(fp2) + fp = open(fpath, 'wb') + k.get_contents_to_file(fp) + fp.close() + fp2.seek(0, 0) + fp = open(fpath, 'rb') + self.assertEqual(fp2.read(), fp.read()) + fp.close() + fp2.close() + all = bucket.get_all_keys() + self.assertEqual(len(all), 6) + rs = bucket.get_all_keys(prefix='foo') + self.assertEqual(len(rs), 3) + rs = bucket.get_all_keys(prefix='', delimiter='/') + self.assertEqual(len(rs), 2) + rs = bucket.get_all_keys(maxkeys=5) + self.assertEqual(len(rs), 5) + + def test_bucket_lookup(self): + """Test the bucket lookup method.""" + bucket = self._MakeBucket() + k = bucket.new_key('foo/bar') + phony_mimetype = 'application/x-boto-test' + headers = {'Content-Type': phony_mimetype} + k.set_contents_from_string('testdata', headers) + + k = bucket.lookup('foo/bar') + self.assertIsInstance(k, bucket.key_class) + self.assertEqual(k.content_type, phony_mimetype) + k = bucket.lookup('notthere') + self.assertIsNone(k) + + def test_metadata(self): + """Test key metadata operations.""" + bucket = self._MakeBucket() + k = self._MakeKey(bucket=bucket) + key_name = k.name + s1 = 'This is a test of file upload and download' + + mdkey1 = 'meta1' + mdval1 = 'This is the first metadata value' + k.set_metadata(mdkey1, mdval1) + mdkey2 = 'meta2' + mdval2 = 'This is the second metadata value' + k.set_metadata(mdkey2, mdval2) + + # Test unicode character. + mdval3 = u'föö' + mdkey3 = 'meta3' + k.set_metadata(mdkey3, mdval3) + k.set_contents_from_string(s1) + + k = bucket.lookup(key_name) + self.assertEqual(k.get_metadata(mdkey1), mdval1) + self.assertEqual(k.get_metadata(mdkey2), mdval2) + self.assertEqual(k.get_metadata(mdkey3), mdval3) + k = bucket.new_key(key_name) + k.get_contents_as_string() + self.assertEqual(k.get_metadata(mdkey1), mdval1) + self.assertEqual(k.get_metadata(mdkey2), mdval2) + self.assertEqual(k.get_metadata(mdkey3), mdval3) + + def test_list_iterator(self): + """Test list and iterator.""" + bucket = self._MakeBucket() + num_iter = len([k for k in bucket.list()]) + rs = bucket.get_all_keys() + num_keys = len(rs) + self.assertEqual(num_iter, num_keys) + + def test_acl(self): + """Test bucket and key ACLs.""" + bucket = self._MakeBucket() + + # try some acl stuff + bucket.set_acl('public-read') + acl = bucket.get_acl() + self.assertEqual(len(acl.entries.entry_list), 2) + bucket.set_acl('private') + acl = bucket.get_acl() + self.assertEqual(len(acl.entries.entry_list), 1) + k = self._MakeKey(bucket=bucket) + k.set_acl('public-read') + acl = k.get_acl() + self.assertEqual(len(acl.entries.entry_list), 2) + k.set_acl('private') + acl = k.get_acl() + self.assertEqual(len(acl.entries.entry_list), 1) + + # Test case-insensitivity of XML ACL parsing. + acl_xml = ( + '' + + 'READ' + + '') + acl = ACL() + h = handler.XmlHandler(acl, bucket) + xml.sax.parseString(acl_xml, h) + bucket.set_acl(acl) + self.assertEqual(len(acl.entries.entry_list), 1) + aclstr = k.get_xml_acl() + self.assertGreater(aclstr.count('/Entry', 1), 0) + + def test_logging(self): + """Test set/get raw logging subresource.""" + bucket = self._MakeBucket() + empty_logging_str="" + logging_str = ( + "" + "log-bucket" + + "example" + + "") + bucket.set_subresource('logging', logging_str) + self.assertEqual(bucket.get_subresource('logging'), logging_str) + # try disable/enable logging + bucket.disable_logging() + self.assertEqual(bucket.get_subresource('logging'), empty_logging_str) + bucket.enable_logging('log-bucket', 'example') + self.assertEqual(bucket.get_subresource('logging'), logging_str) + + def test_copy_key(self): + """Test copying a key from one bucket to another.""" + # create two new, empty buckets + bucket1 = self._MakeBucket() + bucket2 = self._MakeBucket() + bucket_name_1 = bucket1.name + bucket_name_2 = bucket2.name + # verify buckets got created + bucket1 = self._GetConnection().get_bucket(bucket_name_1) + bucket2 = self._GetConnection().get_bucket(bucket_name_2) + # create a key in bucket1 and give it some content + key_name = 'foobar' + k1 = bucket1.new_key(key_name) + self.assertIsInstance(k1, bucket1.key_class) + k1.name = key_name + s = 'This is a test.' + k1.set_contents_from_string(s) + # copy the new key from bucket1 to bucket2 + k1.copy(bucket_name_2, key_name) + # now copy the contents from bucket2 to a local file + k2 = bucket2.lookup(key_name) + self.assertIsInstance(k2, bucket2.key_class) + tmpdir = self._MakeTempDir() + fpath = os.path.join(tmpdir, 'foobar') + fp = open(fpath, 'wb') + k2.get_contents_to_file(fp) + fp.close() + fp = open(fpath) + # check to make sure content read is identical to original + self.assertEqual(s, fp.read()) + fp.close() + # delete keys + bucket1.delete_key(k1) + bucket2.delete_key(k2) + + def test_default_object_acls(self): + """Test default object acls.""" + # create a new bucket + bucket = self._MakeBucket() + # get default acl and make sure it's project-private + acl = bucket.get_def_acl() + self.assertIsNotNone(re.search(PROJECT_PRIVATE_RE, acl.to_xml())) + # set default acl to a canned acl and verify it gets set + bucket.set_def_acl('public-read') + acl = bucket.get_def_acl() + # save public-read acl for later test + public_read_acl = acl + self.assertEqual(acl.to_xml(), ('' + 'READ' + '')) + # back to private acl + bucket.set_def_acl('private') + acl = bucket.get_def_acl() + self.assertEqual(acl.to_xml(), + '') + # set default acl to an xml acl and verify it gets set + bucket.set_def_acl(public_read_acl) + acl = bucket.get_def_acl() + self.assertEqual(acl.to_xml(), ('' + 'READ' + '')) + # back to private acl + bucket.set_def_acl('private') + acl = bucket.get_def_acl() + self.assertEqual(acl.to_xml(), + '') + + def test_default_object_acls_storage_uri(self): + """Test default object acls using storage_uri.""" + # create a new bucket + bucket = self._MakeBucket() + bucket_name = bucket.name + uri = storage_uri('gs://' + bucket_name) + # get default acl and make sure it's project-private + acl = uri.get_def_acl() + self.assertIsNotNone(re.search(PROJECT_PRIVATE_RE, acl.to_xml())) + # set default acl to a canned acl and verify it gets set + uri.set_def_acl('public-read') + acl = uri.get_def_acl() + # save public-read acl for later test + public_read_acl = acl + self.assertEqual(acl.to_xml(), ('' + 'READ' + '')) + # back to private acl + uri.set_def_acl('private') + acl = uri.get_def_acl() + self.assertEqual(acl.to_xml(), + '') + # set default acl to an xml acl and verify it gets set + uri.set_def_acl(public_read_acl) + acl = uri.get_def_acl() + self.assertEqual(acl.to_xml(), ('' + 'READ' + '')) + # back to private acl + uri.set_def_acl('private') + acl = uri.get_def_acl() + self.assertEqual(acl.to_xml(), + '') + + def test_cors_xml_bucket(self): + """Test setting and getting of CORS XML documents on Bucket.""" + # create a new bucket + bucket = self._MakeBucket() + bucket_name = bucket.name + # now call get_bucket to see if it's really there + bucket = self._GetConnection().get_bucket(bucket_name) + # get new bucket cors and make sure it's empty + cors = re.sub(r'\s', '', bucket.get_cors().to_xml()) + self.assertEqual(cors, CORS_EMPTY) + # set cors document on new bucket + bucket.set_cors(CORS_DOC) + cors = re.sub(r'\s', '', bucket.get_cors().to_xml()) + self.assertEqual(cors, CORS_DOC) + + def test_cors_xml_storage_uri(self): + """Test setting and getting of CORS XML documents with storage_uri.""" + # create a new bucket + bucket = self._MakeBucket() + bucket_name = bucket.name + uri = storage_uri('gs://' + bucket_name) + # get new bucket cors and make sure it's empty + cors = re.sub(r'\s', '', uri.get_cors().to_xml()) + self.assertEqual(cors, CORS_EMPTY) + # set cors document on new bucket + cors_obj = Cors() + h = handler.XmlHandler(cors_obj, None) + xml.sax.parseString(CORS_DOC, h) + uri.set_cors(cors_obj) + cors = re.sub(r'\s', '', uri.get_cors().to_xml()) + self.assertEqual(cors, CORS_DOC) + + def test_lifecycle_config_bucket(self): + """Test setting and getting of lifecycle config on Bucket.""" + # create a new bucket + bucket = self._MakeBucket() + bucket_name = bucket.name + # now call get_bucket to see if it's really there + bucket = self._GetConnection().get_bucket(bucket_name) + # get lifecycle config and make sure it's empty + xml = bucket.get_lifecycle_config().to_xml() + self.assertEqual(xml, LIFECYCLE_EMPTY) + # set lifecycle config + lifecycle_config = LifecycleConfig() + lifecycle_config.add_rule('Delete', None, LIFECYCLE_CONDITIONS) + bucket.configure_lifecycle(lifecycle_config) + xml = bucket.get_lifecycle_config().to_xml() + self.assertEqual(xml, LIFECYCLE_DOC) + + def test_lifecycle_config_storage_uri(self): + """Test setting and getting of lifecycle config with storage_uri.""" + # create a new bucket + bucket = self._MakeBucket() + bucket_name = bucket.name + uri = storage_uri('gs://' + bucket_name) + # get lifecycle config and make sure it's empty + xml = uri.get_lifecycle_config().to_xml() + self.assertEqual(xml, LIFECYCLE_EMPTY) + # set lifecycle config + lifecycle_config = LifecycleConfig() + lifecycle_config.add_rule('Delete', None, LIFECYCLE_CONDITIONS) + uri.configure_lifecycle(lifecycle_config) + xml = uri.get_lifecycle_config().to_xml() + self.assertEqual(xml, LIFECYCLE_DOC) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_generation_conditionals.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_generation_conditionals.py new file mode 100644 index 0000000000000000000000000000000000000000..a35c466c1004a2d6ecb360a8d379ddadff4df86c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_generation_conditionals.py @@ -0,0 +1,399 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2013, Google, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +"""Integration tests for GS versioning support.""" + +import StringIO +import os +import tempfile +from xml import sax + +from boto import handler +from boto.exception import GSResponseError +from boto.gs.acl import ACL +from tests.integration.gs.testcase import GSTestCase + + +# HTTP Error returned when a generation precondition fails. +VERSION_MISMATCH = "412" + + +class GSGenerationConditionalsTest(GSTestCase): + + def testConditionalSetContentsFromFile(self): + b = self._MakeBucket() + k = b.new_key("foo") + s1 = "test1" + fp = StringIO.StringIO(s1) + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_contents_from_file(fp, if_generation=999) + + fp = StringIO.StringIO(s1) + k.set_contents_from_file(fp, if_generation=0) + g1 = k.generation + + s2 = "test2" + fp = StringIO.StringIO(s2) + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_contents_from_file(fp, if_generation=int(g1)+1) + + fp = StringIO.StringIO(s2) + k.set_contents_from_file(fp, if_generation=g1) + self.assertEqual(k.get_contents_as_string(), s2) + + def testConditionalSetContentsFromString(self): + b = self._MakeBucket() + k = b.new_key("foo") + s1 = "test1" + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_contents_from_string(s1, if_generation=999) + + k.set_contents_from_string(s1, if_generation=0) + g1 = k.generation + + s2 = "test2" + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_contents_from_string(s2, if_generation=int(g1)+1) + + k.set_contents_from_string(s2, if_generation=g1) + self.assertEqual(k.get_contents_as_string(), s2) + + def testConditionalSetContentsFromFilename(self): + s1 = "test1" + s2 = "test2" + f1 = tempfile.NamedTemporaryFile(prefix="boto-gs-test", delete=False) + f2 = tempfile.NamedTemporaryFile(prefix="boto-gs-test", delete=False) + fname1 = f1.name + fname2 = f2.name + f1.write(s1) + f1.close() + f2.write(s2) + f2.close() + + try: + b = self._MakeBucket() + k = b.new_key("foo") + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_contents_from_filename(fname1, if_generation=999) + + k.set_contents_from_filename(fname1, if_generation=0) + g1 = k.generation + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_contents_from_filename(fname2, if_generation=int(g1)+1) + + k.set_contents_from_filename(fname2, if_generation=g1) + self.assertEqual(k.get_contents_as_string(), s2) + finally: + os.remove(fname1) + os.remove(fname2) + + def testBucketConditionalSetAcl(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + s1 = "test1" + k.set_contents_from_string(s1) + + g1 = k.generation + mg1 = k.metageneration + self.assertEqual(str(mg1), "1") + b.set_acl("public-read", key_name="foo") + + k = b.get_key("foo") + g2 = k.generation + mg2 = k.metageneration + + self.assertEqual(g2, g1) + self.assertGreater(mg2, mg1) + + with self.assertRaisesRegexp(ValueError, ("Received if_metageneration " + "argument with no " + "if_generation argument")): + b.set_acl("bucket-owner-full-control", key_name="foo", + if_metageneration=123) + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + b.set_acl("bucket-owner-full-control", key_name="foo", + if_generation=int(g2) + 1) + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + b.set_acl("bucket-owner-full-control", key_name="foo", + if_generation=g2, if_metageneration=int(mg2) + 1) + + b.set_acl("bucket-owner-full-control", key_name="foo", if_generation=g2) + + k = b.get_key("foo") + g3 = k.generation + mg3 = k.metageneration + self.assertEqual(g3, g2) + self.assertGreater(mg3, mg2) + + b.set_acl("public-read", key_name="foo", if_generation=g3, + if_metageneration=mg3) + + def testConditionalSetContentsFromStream(self): + b = self._MakeBucket() + k = b.new_key("foo") + s1 = "test1" + fp = StringIO.StringIO(s1) + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_contents_from_stream(fp, if_generation=999) + + fp = StringIO.StringIO(s1) + k.set_contents_from_stream(fp, if_generation=0) + g1 = k.generation + + k = b.get_key("foo") + s2 = "test2" + fp = StringIO.StringIO(s2) + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_contents_from_stream(fp, if_generation=int(g1)+1) + + fp = StringIO.StringIO(s2) + k.set_contents_from_stream(fp, if_generation=g1) + self.assertEqual(k.get_contents_as_string(), s2) + + def testBucketConditionalSetCannedAcl(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + s1 = "test1" + k.set_contents_from_string(s1) + + g1 = k.generation + mg1 = k.metageneration + self.assertEqual(str(mg1), "1") + b.set_canned_acl("public-read", key_name="foo") + + k = b.get_key("foo") + g2 = k.generation + mg2 = k.metageneration + + self.assertEqual(g2, g1) + self.assertGreater(mg2, mg1) + + with self.assertRaisesRegexp(ValueError, ("Received if_metageneration " + "argument with no " + "if_generation argument")): + b.set_canned_acl("bucket-owner-full-control", key_name="foo", + if_metageneration=123) + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + b.set_canned_acl("bucket-owner-full-control", key_name="foo", + if_generation=int(g2) + 1) + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + b.set_canned_acl("bucket-owner-full-control", key_name="foo", + if_generation=g2, if_metageneration=int(mg2) + 1) + + b.set_canned_acl("bucket-owner-full-control", key_name="foo", + if_generation=g2) + + k = b.get_key("foo") + g3 = k.generation + mg3 = k.metageneration + self.assertEqual(g3, g2) + self.assertGreater(mg3, mg2) + + b.set_canned_acl("public-read", key_name="foo", if_generation=g3, + if_metageneration=mg3) + + def testBucketConditionalSetXmlAcl(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + s1 = "test1" + k.set_contents_from_string(s1) + + g1 = k.generation + mg1 = k.metageneration + self.assertEqual(str(mg1), "1") + + acl_xml = ( + '' + + 'READ' + + '') + acl = ACL() + h = handler.XmlHandler(acl, b) + sax.parseString(acl_xml, h) + acl = acl.to_xml() + + b.set_xml_acl(acl, key_name="foo") + + k = b.get_key("foo") + g2 = k.generation + mg2 = k.metageneration + + self.assertEqual(g2, g1) + self.assertGreater(mg2, mg1) + + with self.assertRaisesRegexp(ValueError, ("Received if_metageneration " + "argument with no " + "if_generation argument")): + b.set_xml_acl(acl, key_name="foo", if_metageneration=123) + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + b.set_xml_acl(acl, key_name="foo", if_generation=int(g2) + 1) + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + b.set_xml_acl(acl, key_name="foo", if_generation=g2, + if_metageneration=int(mg2) + 1) + + b.set_xml_acl(acl, key_name="foo", if_generation=g2) + + k = b.get_key("foo") + g3 = k.generation + mg3 = k.metageneration + self.assertEqual(g3, g2) + self.assertGreater(mg3, mg2) + + b.set_xml_acl(acl, key_name="foo", if_generation=g3, + if_metageneration=mg3) + + def testObjectConditionalSetAcl(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + k.set_contents_from_string("test1") + + g1 = k.generation + mg1 = k.metageneration + self.assertEqual(str(mg1), "1") + k.set_acl("public-read") + + k = b.get_key("foo") + g2 = k.generation + mg2 = k.metageneration + + self.assertEqual(g2, g1) + self.assertGreater(mg2, mg1) + + with self.assertRaisesRegexp(ValueError, ("Received if_metageneration " + "argument with no " + "if_generation argument")): + k.set_acl("bucket-owner-full-control", if_metageneration=123) + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_acl("bucket-owner-full-control", if_generation=int(g2) + 1) + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_acl("bucket-owner-full-control", if_generation=g2, + if_metageneration=int(mg2) + 1) + + k.set_acl("bucket-owner-full-control", if_generation=g2) + + k = b.get_key("foo") + g3 = k.generation + mg3 = k.metageneration + self.assertEqual(g3, g2) + self.assertGreater(mg3, mg2) + + k.set_acl("public-read", if_generation=g3, if_metageneration=mg3) + + def testObjectConditionalSetCannedAcl(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + k.set_contents_from_string("test1") + + g1 = k.generation + mg1 = k.metageneration + self.assertEqual(str(mg1), "1") + k.set_canned_acl("public-read") + + k = b.get_key("foo") + g2 = k.generation + mg2 = k.metageneration + + self.assertEqual(g2, g1) + self.assertGreater(mg2, mg1) + + with self.assertRaisesRegexp(ValueError, ("Received if_metageneration " + "argument with no " + "if_generation argument")): + k.set_canned_acl("bucket-owner-full-control", + if_metageneration=123) + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_canned_acl("bucket-owner-full-control", + if_generation=int(g2) + 1) + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_canned_acl("bucket-owner-full-control", if_generation=g2, + if_metageneration=int(mg2) + 1) + + k.set_canned_acl("bucket-owner-full-control", if_generation=g2) + + k = b.get_key("foo") + g3 = k.generation + mg3 = k.metageneration + self.assertEqual(g3, g2) + self.assertGreater(mg3, mg2) + + k.set_canned_acl("public-read", if_generation=g3, if_metageneration=mg3) + + def testObjectConditionalSetXmlAcl(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + s1 = "test1" + k.set_contents_from_string(s1) + + g1 = k.generation + mg1 = k.metageneration + self.assertEqual(str(mg1), "1") + + acl_xml = ( + '' + + 'READ' + + '') + acl = ACL() + h = handler.XmlHandler(acl, b) + sax.parseString(acl_xml, h) + acl = acl.to_xml() + + k.set_xml_acl(acl) + + k = b.get_key("foo") + g2 = k.generation + mg2 = k.metageneration + + self.assertEqual(g2, g1) + self.assertGreater(mg2, mg1) + + with self.assertRaisesRegexp(ValueError, ("Received if_metageneration " + "argument with no " + "if_generation argument")): + k.set_xml_acl(acl, if_metageneration=123) + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_xml_acl(acl, if_generation=int(g2) + 1) + + with self.assertRaisesRegexp(GSResponseError, VERSION_MISMATCH): + k.set_xml_acl(acl, if_generation=g2, if_metageneration=int(mg2) + 1) + + k.set_xml_acl(acl, if_generation=g2) + + k = b.get_key("foo") + g3 = k.generation + mg3 = k.metageneration + self.assertEqual(g3, g2) + self.assertGreater(mg3, mg2) + + k.set_xml_acl(acl, if_generation=g3, if_metageneration=mg3) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_resumable_downloads.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_resumable_downloads.py new file mode 100644 index 0000000000000000000000000000000000000000..ba5d9830833e1284f7fed9c497304a175bb43e1b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_resumable_downloads.py @@ -0,0 +1,354 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests of resumable downloads. +""" + +import errno +import os +import re + +import boto +from boto.s3.resumable_download_handler import get_cur_file_size +from boto.s3.resumable_download_handler import ResumableDownloadHandler +from boto.exception import ResumableTransferDisposition +from boto.exception import ResumableDownloadException +from cb_test_harness import CallbackTestHarness +from tests.integration.gs.testcase import GSTestCase + + +SMALL_KEY_SIZE = 2 * 1024 # 2 KB. +LARGE_KEY_SIZE = 500 * 1024 # 500 KB. + + +class ResumableDownloadTests(GSTestCase): + """Resumable download test suite.""" + + def make_small_key(self): + small_src_key_as_string = os.urandom(SMALL_KEY_SIZE) + small_src_key = self._MakeKey(data=small_src_key_as_string) + return small_src_key_as_string, small_src_key + + def make_tracker_file(self, tmpdir=None): + if not tmpdir: + tmpdir = self._MakeTempDir() + tracker_file = os.path.join(tmpdir, 'tracker') + return tracker_file + + def make_dst_fp(self, tmpdir=None): + if not tmpdir: + tmpdir = self._MakeTempDir() + dst_file = os.path.join(tmpdir, 'dstfile') + return open(dst_file, 'w') + + def test_non_resumable_download(self): + """ + Tests that non-resumable downloads work + """ + dst_fp = self.make_dst_fp() + small_src_key_as_string, small_src_key = self.make_small_key() + small_src_key.get_contents_to_file(dst_fp) + self.assertEqual(SMALL_KEY_SIZE, + get_cur_file_size(dst_fp)) + self.assertEqual(small_src_key_as_string, + small_src_key.get_contents_as_string()) + + def test_download_without_persistent_tracker(self): + """ + Tests a single resumable download, with no tracker persistence + """ + res_download_handler = ResumableDownloadHandler() + dst_fp = self.make_dst_fp() + small_src_key_as_string, small_src_key = self.make_small_key() + small_src_key.get_contents_to_file( + dst_fp, res_download_handler=res_download_handler) + self.assertEqual(SMALL_KEY_SIZE, + get_cur_file_size(dst_fp)) + self.assertEqual(small_src_key_as_string, + small_src_key.get_contents_as_string()) + + def test_failed_download_with_persistent_tracker(self): + """ + Tests that failed resumable download leaves a correct tracker file + """ + harness = CallbackTestHarness() + tmpdir = self._MakeTempDir() + tracker_file_name = self.make_tracker_file(tmpdir) + dst_fp = self.make_dst_fp(tmpdir) + res_download_handler = ResumableDownloadHandler( + tracker_file_name=tracker_file_name, num_retries=0) + small_src_key_as_string, small_src_key = self.make_small_key() + try: + small_src_key.get_contents_to_file( + dst_fp, cb=harness.call, + res_download_handler=res_download_handler) + self.fail('Did not get expected ResumableDownloadException') + except ResumableDownloadException, e: + # We'll get a ResumableDownloadException at this point because + # of CallbackTestHarness (above). Check that the tracker file was + # created correctly. + self.assertEqual(e.disposition, + ResumableTransferDisposition.ABORT_CUR_PROCESS) + self.assertTrue(os.path.exists(tracker_file_name)) + f = open(tracker_file_name) + etag_line = f.readline() + self.assertEquals(etag_line.rstrip('\n'), small_src_key.etag.strip('"\'')) + + def test_retryable_exception_recovery(self): + """ + Tests handling of a retryable exception + """ + # Test one of the RETRYABLE_EXCEPTIONS. + exception = ResumableDownloadHandler.RETRYABLE_EXCEPTIONS[0] + harness = CallbackTestHarness(exception=exception) + res_download_handler = ResumableDownloadHandler(num_retries=1) + dst_fp = self.make_dst_fp() + small_src_key_as_string, small_src_key = self.make_small_key() + small_src_key.get_contents_to_file( + dst_fp, cb=harness.call, + res_download_handler=res_download_handler) + # Ensure downloaded object has correct content. + self.assertEqual(SMALL_KEY_SIZE, + get_cur_file_size(dst_fp)) + self.assertEqual(small_src_key_as_string, + small_src_key.get_contents_as_string()) + + def test_broken_pipe_recovery(self): + """ + Tests handling of a Broken Pipe (which interacts with an httplib bug) + """ + exception = IOError(errno.EPIPE, "Broken pipe") + harness = CallbackTestHarness(exception=exception) + res_download_handler = ResumableDownloadHandler(num_retries=1) + dst_fp = self.make_dst_fp() + small_src_key_as_string, small_src_key = self.make_small_key() + small_src_key.get_contents_to_file( + dst_fp, cb=harness.call, + res_download_handler=res_download_handler) + # Ensure downloaded object has correct content. + self.assertEqual(SMALL_KEY_SIZE, + get_cur_file_size(dst_fp)) + self.assertEqual(small_src_key_as_string, + small_src_key.get_contents_as_string()) + + def test_non_retryable_exception_handling(self): + """ + Tests resumable download that fails with a non-retryable exception + """ + harness = CallbackTestHarness( + exception=OSError(errno.EACCES, 'Permission denied')) + res_download_handler = ResumableDownloadHandler(num_retries=1) + dst_fp = self.make_dst_fp() + small_src_key_as_string, small_src_key = self.make_small_key() + try: + small_src_key.get_contents_to_file( + dst_fp, cb=harness.call, + res_download_handler=res_download_handler) + self.fail('Did not get expected OSError') + except OSError, e: + # Ensure the error was re-raised. + self.assertEqual(e.errno, 13) + + def test_failed_and_restarted_download_with_persistent_tracker(self): + """ + Tests resumable download that fails once and then completes, + with tracker file + """ + harness = CallbackTestHarness() + tmpdir = self._MakeTempDir() + tracker_file_name = self.make_tracker_file(tmpdir) + dst_fp = self.make_dst_fp(tmpdir) + small_src_key_as_string, small_src_key = self.make_small_key() + res_download_handler = ResumableDownloadHandler( + tracker_file_name=tracker_file_name, num_retries=1) + small_src_key.get_contents_to_file( + dst_fp, cb=harness.call, + res_download_handler=res_download_handler) + # Ensure downloaded object has correct content. + self.assertEqual(SMALL_KEY_SIZE, + get_cur_file_size(dst_fp)) + self.assertEqual(small_src_key_as_string, + small_src_key.get_contents_as_string()) + # Ensure tracker file deleted. + self.assertFalse(os.path.exists(tracker_file_name)) + + def test_multiple_in_process_failures_then_succeed(self): + """ + Tests resumable download that fails twice in one process, then completes + """ + res_download_handler = ResumableDownloadHandler(num_retries=3) + dst_fp = self.make_dst_fp() + small_src_key_as_string, small_src_key = self.make_small_key() + small_src_key.get_contents_to_file( + dst_fp, res_download_handler=res_download_handler) + # Ensure downloaded object has correct content. + self.assertEqual(SMALL_KEY_SIZE, + get_cur_file_size(dst_fp)) + self.assertEqual(small_src_key_as_string, + small_src_key.get_contents_as_string()) + + def test_multiple_in_process_failures_then_succeed_with_tracker_file(self): + """ + Tests resumable download that fails completely in one process, + then when restarted completes, using a tracker file + """ + # Set up test harness that causes more failures than a single + # ResumableDownloadHandler instance will handle, writing enough data + # before the first failure that some of it survives that process run. + harness = CallbackTestHarness( + fail_after_n_bytes=LARGE_KEY_SIZE/2, num_times_to_fail=2) + larger_src_key_as_string = os.urandom(LARGE_KEY_SIZE) + larger_src_key = self._MakeKey(data=larger_src_key_as_string) + tmpdir = self._MakeTempDir() + tracker_file_name = self.make_tracker_file(tmpdir) + dst_fp = self.make_dst_fp(tmpdir) + res_download_handler = ResumableDownloadHandler( + tracker_file_name=tracker_file_name, num_retries=0) + try: + larger_src_key.get_contents_to_file( + dst_fp, cb=harness.call, + res_download_handler=res_download_handler) + self.fail('Did not get expected ResumableDownloadException') + except ResumableDownloadException, e: + self.assertEqual(e.disposition, + ResumableTransferDisposition.ABORT_CUR_PROCESS) + # Ensure a tracker file survived. + self.assertTrue(os.path.exists(tracker_file_name)) + # Try it one more time; this time should succeed. + larger_src_key.get_contents_to_file( + dst_fp, cb=harness.call, + res_download_handler=res_download_handler) + self.assertEqual(LARGE_KEY_SIZE, + get_cur_file_size(dst_fp)) + self.assertEqual(larger_src_key_as_string, + larger_src_key.get_contents_as_string()) + self.assertFalse(os.path.exists(tracker_file_name)) + # Ensure some of the file was downloaded both before and after failure. + self.assertTrue( + len(harness.transferred_seq_before_first_failure) > 1 and + len(harness.transferred_seq_after_first_failure) > 1) + + def test_download_with_inital_partial_download_before_failure(self): + """ + Tests resumable download that successfully downloads some content + before it fails, then restarts and completes + """ + # Set up harness to fail download after several hundred KB so download + # server will have saved something before we retry. + harness = CallbackTestHarness( + fail_after_n_bytes=LARGE_KEY_SIZE/2) + larger_src_key_as_string = os.urandom(LARGE_KEY_SIZE) + larger_src_key = self._MakeKey(data=larger_src_key_as_string) + res_download_handler = ResumableDownloadHandler(num_retries=1) + dst_fp = self.make_dst_fp() + larger_src_key.get_contents_to_file( + dst_fp, cb=harness.call, + res_download_handler=res_download_handler) + # Ensure downloaded object has correct content. + self.assertEqual(LARGE_KEY_SIZE, + get_cur_file_size(dst_fp)) + self.assertEqual(larger_src_key_as_string, + larger_src_key.get_contents_as_string()) + # Ensure some of the file was downloaded both before and after failure. + self.assertTrue( + len(harness.transferred_seq_before_first_failure) > 1 and + len(harness.transferred_seq_after_first_failure) > 1) + + def test_zero_length_object_download(self): + """ + Tests downloading a zero-length object (exercises boundary conditions). + """ + res_download_handler = ResumableDownloadHandler() + dst_fp = self.make_dst_fp() + k = self._MakeKey() + k.get_contents_to_file(dst_fp, + res_download_handler=res_download_handler) + self.assertEqual(0, get_cur_file_size(dst_fp)) + + def test_download_with_invalid_tracker_etag(self): + """ + Tests resumable download with a tracker file containing an invalid etag + """ + tmp_dir = self._MakeTempDir() + dst_fp = self.make_dst_fp(tmp_dir) + small_src_key_as_string, small_src_key = self.make_small_key() + invalid_etag_tracker_file_name = os.path.join(tmp_dir, + 'invalid_etag_tracker') + f = open(invalid_etag_tracker_file_name, 'w') + f.write('3.14159\n') + f.close() + res_download_handler = ResumableDownloadHandler( + tracker_file_name=invalid_etag_tracker_file_name) + # An error should be printed about the invalid tracker, but then it + # should run the update successfully. + small_src_key.get_contents_to_file( + dst_fp, res_download_handler=res_download_handler) + self.assertEqual(SMALL_KEY_SIZE, get_cur_file_size(dst_fp)) + self.assertEqual(small_src_key_as_string, + small_src_key.get_contents_as_string()) + + def test_download_with_inconsistent_etag_in_tracker(self): + """ + Tests resumable download with an inconsistent etag in tracker file + """ + tmp_dir = self._MakeTempDir() + dst_fp = self.make_dst_fp(tmp_dir) + small_src_key_as_string, small_src_key = self.make_small_key() + inconsistent_etag_tracker_file_name = os.path.join(tmp_dir, + 'inconsistent_etag_tracker') + f = open(inconsistent_etag_tracker_file_name, 'w') + good_etag = small_src_key.etag.strip('"\'') + new_val_as_list = [] + for c in reversed(good_etag): + new_val_as_list.append(c) + f.write('%s\n' % ''.join(new_val_as_list)) + f.close() + res_download_handler = ResumableDownloadHandler( + tracker_file_name=inconsistent_etag_tracker_file_name) + # An error should be printed about the expired tracker, but then it + # should run the update successfully. + small_src_key.get_contents_to_file( + dst_fp, res_download_handler=res_download_handler) + self.assertEqual(SMALL_KEY_SIZE, + get_cur_file_size(dst_fp)) + self.assertEqual(small_src_key_as_string, + small_src_key.get_contents_as_string()) + + def test_download_with_unwritable_tracker_file(self): + """ + Tests resumable download with an unwritable tracker file + """ + # Make dir where tracker_file lives temporarily unwritable. + tmp_dir = self._MakeTempDir() + tracker_file_name = os.path.join(tmp_dir, 'tracker') + save_mod = os.stat(tmp_dir).st_mode + try: + os.chmod(tmp_dir, 0) + res_download_handler = ResumableDownloadHandler( + tracker_file_name=tracker_file_name) + except ResumableDownloadException, e: + self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT) + self.assertNotEqual( + e.message.find('Couldn\'t write URI tracker file'), -1) + finally: + # Restore original protection of dir where tracker_file lives. + os.chmod(tmp_dir, save_mod) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_resumable_uploads.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_resumable_uploads.py new file mode 100644 index 0000000000000000000000000000000000000000..605937f3aba9baa36b253470428d60f4c2893cef --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_resumable_uploads.py @@ -0,0 +1,552 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests of Google Cloud Storage resumable uploads. +""" + +import StringIO +import errno +import random +import os +import time + +import boto +from boto import storage_uri +from boto.gs.resumable_upload_handler import ResumableUploadHandler +from boto.exception import InvalidUriError +from boto.exception import ResumableTransferDisposition +from boto.exception import ResumableUploadException +from cb_test_harness import CallbackTestHarness +from tests.integration.gs.testcase import GSTestCase + + +SMALL_KEY_SIZE = 2 * 1024 # 2 KB. +LARGE_KEY_SIZE = 500 * 1024 # 500 KB. +LARGEST_KEY_SIZE = 1024 * 1024 # 1 MB. + + +class ResumableUploadTests(GSTestCase): + """Resumable upload test suite.""" + + def build_input_file(self, size): + buf = [] + # I manually construct the random data here instead of calling + # os.urandom() because I want to constrain the range of data (in + # this case to 0'..'9') so the test + # code can easily overwrite part of the StringIO file with + # known-to-be-different values. + for i in range(size): + buf.append(str(random.randint(0, 9))) + file_as_string = ''.join(buf) + return (file_as_string, StringIO.StringIO(file_as_string)) + + def make_small_file(self): + return self.build_input_file(SMALL_KEY_SIZE) + + def make_large_file(self): + return self.build_input_file(LARGE_KEY_SIZE) + + def make_tracker_file(self, tmpdir=None): + if not tmpdir: + tmpdir = self._MakeTempDir() + tracker_file = os.path.join(tmpdir, 'tracker') + return tracker_file + + def test_non_resumable_upload(self): + """ + Tests that non-resumable uploads work + """ + small_src_file_as_string, small_src_file = self.make_small_file() + # Seek to end incase its the first test. + small_src_file.seek(0, os.SEEK_END) + dst_key = self._MakeKey(set_contents=False) + try: + dst_key.set_contents_from_file(small_src_file) + self.fail("should fail as need to rewind the filepointer") + except AttributeError: + pass + # Now try calling with a proper rewind. + dst_key.set_contents_from_file(small_src_file, rewind=True) + self.assertEqual(SMALL_KEY_SIZE, dst_key.size) + self.assertEqual(small_src_file_as_string, + dst_key.get_contents_as_string()) + + def test_upload_without_persistent_tracker(self): + """ + Tests a single resumable upload, with no tracker URI persistence + """ + res_upload_handler = ResumableUploadHandler() + small_src_file_as_string, small_src_file = self.make_small_file() + small_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + dst_key.set_contents_from_file( + small_src_file, res_upload_handler=res_upload_handler) + self.assertEqual(SMALL_KEY_SIZE, dst_key.size) + self.assertEqual(small_src_file_as_string, + dst_key.get_contents_as_string()) + + def test_failed_upload_with_persistent_tracker(self): + """ + Tests that failed resumable upload leaves a correct tracker URI file + """ + harness = CallbackTestHarness() + tracker_file_name = self.make_tracker_file() + res_upload_handler = ResumableUploadHandler( + tracker_file_name=tracker_file_name, num_retries=0) + small_src_file_as_string, small_src_file = self.make_small_file() + small_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + try: + dst_key.set_contents_from_file( + small_src_file, cb=harness.call, + res_upload_handler=res_upload_handler) + self.fail('Did not get expected ResumableUploadException') + except ResumableUploadException, e: + # We'll get a ResumableUploadException at this point because + # of CallbackTestHarness (above). Check that the tracker file was + # created correctly. + self.assertEqual(e.disposition, + ResumableTransferDisposition.ABORT_CUR_PROCESS) + self.assertTrue(os.path.exists(tracker_file_name)) + f = open(tracker_file_name) + uri_from_file = f.readline().strip() + f.close() + self.assertEqual(uri_from_file, + res_upload_handler.get_tracker_uri()) + + def test_retryable_exception_recovery(self): + """ + Tests handling of a retryable exception + """ + # Test one of the RETRYABLE_EXCEPTIONS. + exception = ResumableUploadHandler.RETRYABLE_EXCEPTIONS[0] + harness = CallbackTestHarness(exception=exception) + res_upload_handler = ResumableUploadHandler(num_retries=1) + small_src_file_as_string, small_src_file = self.make_small_file() + small_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + dst_key.set_contents_from_file( + small_src_file, cb=harness.call, + res_upload_handler=res_upload_handler) + # Ensure uploaded object has correct content. + self.assertEqual(SMALL_KEY_SIZE, dst_key.size) + self.assertEqual(small_src_file_as_string, + dst_key.get_contents_as_string()) + + def test_broken_pipe_recovery(self): + """ + Tests handling of a Broken Pipe (which interacts with an httplib bug) + """ + exception = IOError(errno.EPIPE, "Broken pipe") + harness = CallbackTestHarness(exception=exception) + res_upload_handler = ResumableUploadHandler(num_retries=1) + small_src_file_as_string, small_src_file = self.make_small_file() + small_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + dst_key.set_contents_from_file( + small_src_file, cb=harness.call, + res_upload_handler=res_upload_handler) + # Ensure uploaded object has correct content. + self.assertEqual(SMALL_KEY_SIZE, dst_key.size) + self.assertEqual(small_src_file_as_string, + dst_key.get_contents_as_string()) + + def test_non_retryable_exception_handling(self): + """ + Tests a resumable upload that fails with a non-retryable exception + """ + harness = CallbackTestHarness( + exception=OSError(errno.EACCES, 'Permission denied')) + res_upload_handler = ResumableUploadHandler(num_retries=1) + small_src_file_as_string, small_src_file = self.make_small_file() + small_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + try: + dst_key.set_contents_from_file( + small_src_file, cb=harness.call, + res_upload_handler=res_upload_handler) + self.fail('Did not get expected OSError') + except OSError, e: + # Ensure the error was re-raised. + self.assertEqual(e.errno, 13) + + def test_failed_and_restarted_upload_with_persistent_tracker(self): + """ + Tests resumable upload that fails once and then completes, with tracker + file + """ + harness = CallbackTestHarness() + tracker_file_name = self.make_tracker_file() + res_upload_handler = ResumableUploadHandler( + tracker_file_name=tracker_file_name, num_retries=1) + small_src_file_as_string, small_src_file = self.make_small_file() + small_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + dst_key.set_contents_from_file( + small_src_file, cb=harness.call, + res_upload_handler=res_upload_handler) + # Ensure uploaded object has correct content. + self.assertEqual(SMALL_KEY_SIZE, dst_key.size) + self.assertEqual(small_src_file_as_string, + dst_key.get_contents_as_string()) + # Ensure tracker file deleted. + self.assertFalse(os.path.exists(tracker_file_name)) + + def test_multiple_in_process_failures_then_succeed(self): + """ + Tests resumable upload that fails twice in one process, then completes + """ + res_upload_handler = ResumableUploadHandler(num_retries=3) + small_src_file_as_string, small_src_file = self.make_small_file() + small_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + dst_key.set_contents_from_file( + small_src_file, res_upload_handler=res_upload_handler) + # Ensure uploaded object has correct content. + self.assertEqual(SMALL_KEY_SIZE, dst_key.size) + self.assertEqual(small_src_file_as_string, + dst_key.get_contents_as_string()) + + def test_multiple_in_process_failures_then_succeed_with_tracker_file(self): + """ + Tests resumable upload that fails completely in one process, + then when restarted completes, using a tracker file + """ + # Set up test harness that causes more failures than a single + # ResumableUploadHandler instance will handle, writing enough data + # before the first failure that some of it survives that process run. + harness = CallbackTestHarness( + fail_after_n_bytes=LARGE_KEY_SIZE/2, num_times_to_fail=2) + tracker_file_name = self.make_tracker_file() + res_upload_handler = ResumableUploadHandler( + tracker_file_name=tracker_file_name, num_retries=1) + larger_src_file_as_string, larger_src_file = self.make_large_file() + larger_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + try: + dst_key.set_contents_from_file( + larger_src_file, cb=harness.call, + res_upload_handler=res_upload_handler) + self.fail('Did not get expected ResumableUploadException') + except ResumableUploadException, e: + self.assertEqual(e.disposition, + ResumableTransferDisposition.ABORT_CUR_PROCESS) + # Ensure a tracker file survived. + self.assertTrue(os.path.exists(tracker_file_name)) + # Try it one more time; this time should succeed. + larger_src_file.seek(0) + dst_key.set_contents_from_file( + larger_src_file, cb=harness.call, + res_upload_handler=res_upload_handler) + self.assertEqual(LARGE_KEY_SIZE, dst_key.size) + self.assertEqual(larger_src_file_as_string, + dst_key.get_contents_as_string()) + self.assertFalse(os.path.exists(tracker_file_name)) + # Ensure some of the file was uploaded both before and after failure. + self.assertTrue(len(harness.transferred_seq_before_first_failure) > 1 + and + len(harness.transferred_seq_after_first_failure) > 1) + + def test_upload_with_inital_partial_upload_before_failure(self): + """ + Tests resumable upload that successfully uploads some content + before it fails, then restarts and completes + """ + # Set up harness to fail upload after several hundred KB so upload + # server will have saved something before we retry. + harness = CallbackTestHarness( + fail_after_n_bytes=LARGE_KEY_SIZE/2) + res_upload_handler = ResumableUploadHandler(num_retries=1) + larger_src_file_as_string, larger_src_file = self.make_large_file() + larger_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + dst_key.set_contents_from_file( + larger_src_file, cb=harness.call, + res_upload_handler=res_upload_handler) + # Ensure uploaded object has correct content. + self.assertEqual(LARGE_KEY_SIZE, dst_key.size) + self.assertEqual(larger_src_file_as_string, + dst_key.get_contents_as_string()) + # Ensure some of the file was uploaded both before and after failure. + self.assertTrue(len(harness.transferred_seq_before_first_failure) > 1 + and + len(harness.transferred_seq_after_first_failure) > 1) + + def test_empty_file_upload(self): + """ + Tests uploading an empty file (exercises boundary conditions). + """ + res_upload_handler = ResumableUploadHandler() + empty_src_file = StringIO.StringIO('') + empty_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + dst_key.set_contents_from_file( + empty_src_file, res_upload_handler=res_upload_handler) + self.assertEqual(0, dst_key.size) + + def test_upload_retains_metadata(self): + """ + Tests that resumable upload correctly sets passed metadata + """ + res_upload_handler = ResumableUploadHandler() + headers = {'Content-Type' : 'text/plain', 'x-goog-meta-abc' : 'my meta', + 'x-goog-acl' : 'public-read'} + small_src_file_as_string, small_src_file = self.make_small_file() + small_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + dst_key.set_contents_from_file( + small_src_file, headers=headers, + res_upload_handler=res_upload_handler) + self.assertEqual(SMALL_KEY_SIZE, dst_key.size) + self.assertEqual(small_src_file_as_string, + dst_key.get_contents_as_string()) + dst_key.open_read() + self.assertEqual('text/plain', dst_key.content_type) + self.assertTrue('abc' in dst_key.metadata) + self.assertEqual('my meta', str(dst_key.metadata['abc'])) + acl = dst_key.get_acl() + for entry in acl.entries.entry_list: + if str(entry.scope) == '': + self.assertEqual('READ', str(acl.entries.entry_list[1].permission)) + return + self.fail('No scope found') + + def test_upload_with_file_size_change_between_starts(self): + """ + Tests resumable upload on a file that changes sizes between initial + upload start and restart + """ + harness = CallbackTestHarness( + fail_after_n_bytes=LARGE_KEY_SIZE/2) + tracker_file_name = self.make_tracker_file() + # Set up first process' ResumableUploadHandler not to do any + # retries (initial upload request will establish expected size to + # upload server). + res_upload_handler = ResumableUploadHandler( + tracker_file_name=tracker_file_name, num_retries=0) + larger_src_file_as_string, larger_src_file = self.make_large_file() + larger_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + try: + dst_key.set_contents_from_file( + larger_src_file, cb=harness.call, + res_upload_handler=res_upload_handler) + self.fail('Did not get expected ResumableUploadException') + except ResumableUploadException, e: + # First abort (from harness-forced failure) should be + # ABORT_CUR_PROCESS. + self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT_CUR_PROCESS) + # Ensure a tracker file survived. + self.assertTrue(os.path.exists(tracker_file_name)) + # Try it again, this time with different size source file. + # Wait 1 second between retry attempts, to give upload server a + # chance to save state so it can respond to changed file size with + # 500 response in the next attempt. + time.sleep(1) + try: + largest_src_file = self.build_input_file(LARGEST_KEY_SIZE)[1] + largest_src_file.seek(0) + dst_key.set_contents_from_file( + largest_src_file, res_upload_handler=res_upload_handler) + self.fail('Did not get expected ResumableUploadException') + except ResumableUploadException, e: + # This abort should be a hard abort (file size changing during + # transfer). + self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT) + self.assertNotEqual(e.message.find('file size changed'), -1, e.message) + + def test_upload_with_file_size_change_during_upload(self): + """ + Tests resumable upload on a file that changes sizes while upload + in progress + """ + # Create a file we can change during the upload. + test_file_size = 500 * 1024 # 500 KB. + test_file = self.build_input_file(test_file_size)[1] + harness = CallbackTestHarness(fp_to_change=test_file, + fp_change_pos=test_file_size) + res_upload_handler = ResumableUploadHandler(num_retries=1) + dst_key = self._MakeKey(set_contents=False) + try: + dst_key.set_contents_from_file( + test_file, cb=harness.call, + res_upload_handler=res_upload_handler) + self.fail('Did not get expected ResumableUploadException') + except ResumableUploadException, e: + self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT) + self.assertNotEqual( + e.message.find('File changed during upload'), -1) + + def test_upload_with_file_content_change_during_upload(self): + """ + Tests resumable upload on a file that changes one byte of content + (so, size stays the same) while upload in progress. + """ + def Execute(): + res_upload_handler = ResumableUploadHandler(num_retries=1) + dst_key = self._MakeKey(set_contents=False) + bucket_uri = storage_uri('gs://' + dst_key.bucket.name) + dst_key_uri = bucket_uri.clone_replace_name(dst_key.name) + try: + dst_key.set_contents_from_file( + test_file, cb=harness.call, + res_upload_handler=res_upload_handler) + return False + except ResumableUploadException, e: + self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT) + # Ensure the file size didn't change. + test_file.seek(0, os.SEEK_END) + self.assertEqual(test_file_size, test_file.tell()) + self.assertNotEqual( + e.message.find('md5 signature doesn\'t match etag'), -1) + # Ensure the bad data wasn't left around. + try: + dst_key_uri.get_key() + self.fail('Did not get expected InvalidUriError') + except InvalidUriError, e: + pass + return True + + test_file_size = 500 * 1024 # 500 KB + # The sizes of all the blocks written, except the final block, must be a + # multiple of 256K bytes. We need to trigger a failure after the first + # 256K bytes have been uploaded so that at least one block of data is + # written on the server. + # See https://developers.google.com/storage/docs/concepts-techniques#resumable + # for more information about chunking of uploads. + n_bytes = 300 * 1024 # 300 KB + delay = 0 + # First, try the test without a delay. If that fails, try it with a + # 15-second delay. The first attempt may fail to recognize that the + # server has a block if the server hasn't yet committed that block + # when we resume the transfer. This would cause a restarted upload + # instead of a resumed upload. + for attempt in range(2): + test_file = self.build_input_file(test_file_size)[1] + harness = CallbackTestHarness( + fail_after_n_bytes=n_bytes, + fp_to_change=test_file, + # Write to byte 1, as the CallbackTestHarness writes + # 3 bytes. This will result in the data on the server + # being different than the local file. + fp_change_pos=1, + delay_after_change=delay) + if Execute(): + break + if (attempt == 0 and + 0 in harness.transferred_seq_after_first_failure): + # We can confirm the upload was restarted instead of resumed + # by determining if there is an entry of 0 in the + # transferred_seq_after_first_failure list. + # In that case, try again with a 15 second delay. + delay = 15 + continue + self.fail('Did not get expected ResumableUploadException') + + def test_upload_with_content_length_header_set(self): + """ + Tests resumable upload on a file when the user supplies a + Content-Length header. This is used by gsutil, for example, + to set the content length when gzipping a file. + """ + res_upload_handler = ResumableUploadHandler() + small_src_file_as_string, small_src_file = self.make_small_file() + small_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + try: + dst_key.set_contents_from_file( + small_src_file, res_upload_handler=res_upload_handler, + headers={'Content-Length' : SMALL_KEY_SIZE}) + self.fail('Did not get expected ResumableUploadException') + except ResumableUploadException, e: + self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT) + self.assertNotEqual( + e.message.find('Attempt to specify Content-Length header'), -1) + + def test_upload_with_syntactically_invalid_tracker_uri(self): + """ + Tests resumable upload with a syntactically invalid tracker URI + """ + tmp_dir = self._MakeTempDir() + syntactically_invalid_tracker_file_name = os.path.join(tmp_dir, + 'synt_invalid_uri_tracker') + with open(syntactically_invalid_tracker_file_name, 'w') as f: + f.write('ftp://example.com') + res_upload_handler = ResumableUploadHandler( + tracker_file_name=syntactically_invalid_tracker_file_name) + small_src_file_as_string, small_src_file = self.make_small_file() + # An error should be printed about the invalid URI, but then it + # should run the update successfully. + small_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + dst_key.set_contents_from_file( + small_src_file, res_upload_handler=res_upload_handler) + self.assertEqual(SMALL_KEY_SIZE, dst_key.size) + self.assertEqual(small_src_file_as_string, + dst_key.get_contents_as_string()) + + def test_upload_with_invalid_upload_id_in_tracker_file(self): + """ + Tests resumable upload with invalid upload ID + """ + invalid_upload_id = ('http://pub.storage.googleapis.com/?upload_id=' + 'AyzB2Uo74W4EYxyi5dp_-r68jz8rtbvshsv4TX7srJVkJ57CxTY5Dw2') + tmpdir = self._MakeTempDir() + invalid_upload_id_tracker_file_name = os.path.join(tmpdir, + 'invalid_upload_id_tracker') + with open(invalid_upload_id_tracker_file_name, 'w') as f: + f.write(invalid_upload_id) + + res_upload_handler = ResumableUploadHandler( + tracker_file_name=invalid_upload_id_tracker_file_name) + small_src_file_as_string, small_src_file = self.make_small_file() + # An error should occur, but then the tracker URI should be + # regenerated and the the update should succeed. + small_src_file.seek(0) + dst_key = self._MakeKey(set_contents=False) + dst_key.set_contents_from_file( + small_src_file, res_upload_handler=res_upload_handler) + self.assertEqual(SMALL_KEY_SIZE, dst_key.size) + self.assertEqual(small_src_file_as_string, + dst_key.get_contents_as_string()) + self.assertNotEqual(invalid_upload_id, + res_upload_handler.get_tracker_uri()) + + def test_upload_with_unwritable_tracker_file(self): + """ + Tests resumable upload with an unwritable tracker file + """ + # Make dir where tracker_file lives temporarily unwritable. + tmp_dir = self._MakeTempDir() + tracker_file_name = self.make_tracker_file(tmp_dir) + save_mod = os.stat(tmp_dir).st_mode + try: + os.chmod(tmp_dir, 0) + res_upload_handler = ResumableUploadHandler( + tracker_file_name=tracker_file_name) + except ResumableUploadException, e: + self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT) + self.assertNotEqual( + e.message.find('Couldn\'t write URI tracker file'), -1) + finally: + # Restore original protection of dir where tracker_file lives. + os.chmod(tmp_dir, save_mod) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_storage_uri.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_storage_uri.py new file mode 100644 index 0000000000000000000000000000000000000000..a8ed3b6283261c3ba4dec6629134dc15aa62c34f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_storage_uri.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2013, Google, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +"""Integration tests for StorageUri interface.""" + +import binascii +import re +import StringIO + +from boto import storage_uri +from boto.exception import BotoClientError +from boto.gs.acl import SupportedPermissions as perms +from tests.integration.gs.testcase import GSTestCase + + +class GSStorageUriTest(GSTestCase): + + def testHasVersion(self): + uri = storage_uri("gs://bucket/obj") + self.assertFalse(uri.has_version()) + uri.version_id = "versionid" + self.assertTrue(uri.has_version()) + + uri = storage_uri("gs://bucket/obj") + # Generation triggers versioning. + uri.generation = 12345 + self.assertTrue(uri.has_version()) + uri.generation = None + self.assertFalse(uri.has_version()) + + # Zero-generation counts as a version. + uri = storage_uri("gs://bucket/obj") + uri.generation = 0 + self.assertTrue(uri.has_version()) + + def testCloneReplaceKey(self): + b = self._MakeBucket() + k = b.new_key("obj") + k.set_contents_from_string("stringdata") + + orig_uri = storage_uri("gs://%s/" % b.name) + + uri = orig_uri.clone_replace_key(k) + self.assertTrue(uri.has_version()) + self.assertRegexpMatches(str(uri.generation), r"[0-9]+") + + def testSetAclXml(self): + """Ensures that calls to the set_xml_acl functions succeed.""" + b = self._MakeBucket() + k = b.new_key("obj") + k.set_contents_from_string("stringdata") + bucket_uri = storage_uri("gs://%s/" % b.name) + + # Get a valid ACL for an object. + bucket_uri.object_name = "obj" + bucket_acl = bucket_uri.get_acl() + bucket_uri.object_name = None + + # Add a permission to the ACL. + all_users_read_permission = ("" + "READ") + acl_string = re.sub(r"", + all_users_read_permission + "", + bucket_acl.to_xml()) + + # Test-generated owner IDs are not currently valid for buckets + acl_no_owner_string = re.sub(r".*", "", acl_string) + + # Set ACL on an object. + bucket_uri.set_xml_acl(acl_string, "obj") + # Set ACL on a bucket. + bucket_uri.set_xml_acl(acl_no_owner_string) + # Set the default ACL for a bucket. + bucket_uri.set_def_xml_acl(acl_no_owner_string) + + # Verify all the ACLs were successfully applied. + new_obj_acl_string = k.get_acl().to_xml() + new_bucket_acl_string = bucket_uri.get_acl().to_xml() + new_bucket_def_acl_string = bucket_uri.get_def_acl().to_xml() + self.assertRegexpMatches(new_obj_acl_string, r"AllUsers") + self.assertRegexpMatches(new_bucket_acl_string, r"AllUsers") + self.assertRegexpMatches(new_bucket_def_acl_string, r"AllUsers") + + def testPropertiesUpdated(self): + b = self._MakeBucket() + bucket_uri = storage_uri("gs://%s" % b.name) + key_uri = bucket_uri.clone_replace_name("obj") + key_uri.set_contents_from_string("data1") + + self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+") + k = b.get_key("obj") + self.assertEqual(k.generation, key_uri.generation) + self.assertEquals(k.get_contents_as_string(), "data1") + + key_uri.set_contents_from_stream(StringIO.StringIO("data2")) + self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+") + self.assertGreater(key_uri.generation, k.generation) + k = b.get_key("obj") + self.assertEqual(k.generation, key_uri.generation) + self.assertEquals(k.get_contents_as_string(), "data2") + + key_uri.set_contents_from_file(StringIO.StringIO("data3")) + self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+") + self.assertGreater(key_uri.generation, k.generation) + k = b.get_key("obj") + self.assertEqual(k.generation, key_uri.generation) + self.assertEquals(k.get_contents_as_string(), "data3") + + def testCompose(self): + data1 = 'hello ' + data2 = 'world!' + expected_crc = 1238062967 + + b = self._MakeBucket() + bucket_uri = storage_uri("gs://%s" % b.name) + key_uri1 = bucket_uri.clone_replace_name("component1") + key_uri1.set_contents_from_string(data1) + key_uri2 = bucket_uri.clone_replace_name("component2") + key_uri2.set_contents_from_string(data2) + + # Simple compose. + key_uri_composite = bucket_uri.clone_replace_name("composite") + components = [key_uri1, key_uri2] + key_uri_composite.compose(components, content_type='text/plain') + self.assertEquals(key_uri_composite.get_contents_as_string(), + data1 + data2) + composite_key = key_uri_composite.get_key() + cloud_crc32c = binascii.hexlify( + composite_key.cloud_hashes['crc32c']) + self.assertEquals(cloud_crc32c, hex(expected_crc)[2:]) + self.assertEquals(composite_key.content_type, 'text/plain') + + # Compose disallowed between buckets. + key_uri1.bucket_name += '2' + try: + key_uri_composite.compose(components) + self.fail('Composing between buckets didn\'t fail as expected.') + except BotoClientError as err: + self.assertEquals( + err.reason, 'GCS does not support inter-bucket composing') + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_versioning.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_versioning.py new file mode 100644 index 0000000000000000000000000000000000000000..6d1aedde4c4c364f33bd9e8bf56759c42d469fcd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/test_versioning.py @@ -0,0 +1,267 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012, Google, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +"""Integration tests for GS versioning support.""" + +from xml import sax + +from boto import handler +from boto.gs import acl +from tests.integration.gs.testcase import GSTestCase + + +class GSVersioningTest(GSTestCase): + + def testVersioningToggle(self): + b = self._MakeBucket() + self.assertFalse(b.get_versioning_status()) + b.configure_versioning(True) + self.assertTrue(b.get_versioning_status()) + b.configure_versioning(False) + self.assertFalse(b.get_versioning_status()) + + def testDeleteVersionedKey(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + s1 = "test1" + k.set_contents_from_string(s1) + + k = b.get_key("foo") + g1 = k.generation + + s2 = "test2" + k.set_contents_from_string(s2) + k = b.get_key("foo") + g2 = k.generation + + versions = list(b.list_versions()) + self.assertEqual(len(versions), 2) + self.assertEqual(versions[0].name, "foo") + self.assertEqual(versions[1].name, "foo") + generations = [k.generation for k in versions] + self.assertIn(g1, generations) + self.assertIn(g2, generations) + + # Delete "current" version and make sure that version is no longer + # visible from a basic GET call. + b.delete_key("foo", generation=None) + self.assertIsNone(b.get_key("foo")) + + # Both old versions should still be there when listed using the versions + # query parameter. + versions = list(b.list_versions()) + self.assertEqual(len(versions), 2) + self.assertEqual(versions[0].name, "foo") + self.assertEqual(versions[1].name, "foo") + generations = [k.generation for k in versions] + self.assertIn(g1, generations) + self.assertIn(g2, generations) + + # Delete generation 2 and make sure it's gone. + b.delete_key("foo", generation=g2) + versions = list(b.list_versions()) + self.assertEqual(len(versions), 1) + self.assertEqual(versions[0].name, "foo") + self.assertEqual(versions[0].generation, g1) + + # Delete generation 1 and make sure it's gone. + b.delete_key("foo", generation=g1) + versions = list(b.list_versions()) + self.assertEqual(len(versions), 0) + + def testGetVersionedKey(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + s1 = "test1" + k.set_contents_from_string(s1) + + k = b.get_key("foo") + g1 = k.generation + o1 = k.get_contents_as_string() + self.assertEqual(o1, s1) + + s2 = "test2" + k.set_contents_from_string(s2) + k = b.get_key("foo") + g2 = k.generation + self.assertNotEqual(g2, g1) + o2 = k.get_contents_as_string() + self.assertEqual(o2, s2) + + k = b.get_key("foo", generation=g1) + self.assertEqual(k.get_contents_as_string(), s1) + k = b.get_key("foo", generation=g2) + self.assertEqual(k.get_contents_as_string(), s2) + + def testVersionedBucketCannedAcl(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + s1 = "test1" + k.set_contents_from_string(s1) + + k = b.get_key("foo") + g1 = k.generation + + s2 = "test2" + k.set_contents_from_string(s2) + k = b.get_key("foo") + g2 = k.generation + + acl1g1 = b.get_acl("foo", generation=g1) + acl1g2 = b.get_acl("foo", generation=g2) + owner1g1 = acl1g1.owner.id + owner1g2 = acl1g2.owner.id + self.assertEqual(owner1g1, owner1g2) + entries1g1 = acl1g1.entries.entry_list + entries1g2 = acl1g2.entries.entry_list + self.assertEqual(len(entries1g1), len(entries1g2)) + + b.set_acl("public-read", key_name="foo", generation=g1) + + acl2g1 = b.get_acl("foo", generation=g1) + acl2g2 = b.get_acl("foo", generation=g2) + entries2g1 = acl2g1.entries.entry_list + entries2g2 = acl2g2.entries.entry_list + self.assertEqual(len(entries2g2), len(entries1g2)) + public_read_entries1 = [e for e in entries2g1 if e.permission == "READ" + and e.scope.type == acl.ALL_USERS] + public_read_entries2 = [e for e in entries2g2 if e.permission == "READ" + and e.scope.type == acl.ALL_USERS] + self.assertEqual(len(public_read_entries1), 1) + self.assertEqual(len(public_read_entries2), 0) + + def testVersionedBucketXmlAcl(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + s1 = "test1" + k.set_contents_from_string(s1) + + k = b.get_key("foo") + g1 = k.generation + + s2 = "test2" + k.set_contents_from_string(s2) + k = b.get_key("foo") + g2 = k.generation + + acl1g1 = b.get_acl("foo", generation=g1) + acl1g2 = b.get_acl("foo", generation=g2) + owner1g1 = acl1g1.owner.id + owner1g2 = acl1g2.owner.id + self.assertEqual(owner1g1, owner1g2) + entries1g1 = acl1g1.entries.entry_list + entries1g2 = acl1g2.entries.entry_list + self.assertEqual(len(entries1g1), len(entries1g2)) + + acl_xml = ( + '' + + 'READ' + + '') + aclo = acl.ACL() + h = handler.XmlHandler(aclo, b) + sax.parseString(acl_xml, h) + + b.set_acl(aclo, key_name="foo", generation=g1) + + acl2g1 = b.get_acl("foo", generation=g1) + acl2g2 = b.get_acl("foo", generation=g2) + entries2g1 = acl2g1.entries.entry_list + entries2g2 = acl2g2.entries.entry_list + self.assertEqual(len(entries2g2), len(entries1g2)) + public_read_entries1 = [e for e in entries2g1 if e.permission == "READ" + and e.scope.type == acl.ALL_USERS] + public_read_entries2 = [e for e in entries2g2 if e.permission == "READ" + and e.scope.type == acl.ALL_USERS] + self.assertEqual(len(public_read_entries1), 1) + self.assertEqual(len(public_read_entries2), 0) + + def testVersionedObjectCannedAcl(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + s1 = "test1" + k.set_contents_from_string(s1) + + k = b.get_key("foo") + g1 = k.generation + + s2 = "test2" + k.set_contents_from_string(s2) + k = b.get_key("foo") + g2 = k.generation + + acl1g1 = b.get_acl("foo", generation=g1) + acl1g2 = b.get_acl("foo", generation=g2) + owner1g1 = acl1g1.owner.id + owner1g2 = acl1g2.owner.id + self.assertEqual(owner1g1, owner1g2) + entries1g1 = acl1g1.entries.entry_list + entries1g2 = acl1g2.entries.entry_list + self.assertEqual(len(entries1g1), len(entries1g2)) + + b.set_acl("public-read", key_name="foo", generation=g1) + + acl2g1 = b.get_acl("foo", generation=g1) + acl2g2 = b.get_acl("foo", generation=g2) + entries2g1 = acl2g1.entries.entry_list + entries2g2 = acl2g2.entries.entry_list + self.assertEqual(len(entries2g2), len(entries1g2)) + public_read_entries1 = [e for e in entries2g1 if e.permission == "READ" + and e.scope.type == acl.ALL_USERS] + public_read_entries2 = [e for e in entries2g2 if e.permission == "READ" + and e.scope.type == acl.ALL_USERS] + self.assertEqual(len(public_read_entries1), 1) + self.assertEqual(len(public_read_entries2), 0) + + def testCopyVersionedKey(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + s1 = "test1" + k.set_contents_from_string(s1) + + k = b.get_key("foo") + g1 = k.generation + + s2 = "test2" + k.set_contents_from_string(s2) + + b2 = self._MakeVersionedBucket() + b2.copy_key("foo2", b.name, "foo", src_generation=g1) + + k2 = b2.get_key("foo2") + s3 = k2.get_contents_as_string() + self.assertEqual(s3, s1) + + def testKeyGenerationUpdatesOnSet(self): + b = self._MakeVersionedBucket() + k = b.new_key("foo") + self.assertIsNone(k.generation) + k.set_contents_from_string("test1") + g1 = k.generation + self.assertRegexpMatches(g1, r'[0-9]+') + self.assertEqual(k.metageneration, '1') + k.set_contents_from_string("test2") + g2 = k.generation + self.assertNotEqual(g1, g2) + self.assertRegexpMatches(g2, r'[0-9]+') + self.assertGreater(int(g2), int(g1)) + self.assertEqual(k.metageneration, '1') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/testcase.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/testcase.py new file mode 100644 index 0000000000000000000000000000000000000000..b16ea8f8bf94921ae2b5158d62e968143e6d39ca --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/testcase.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2013, Google, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +"""Base TestCase class for gs integration tests.""" + +import shutil +import tempfile +import time + +from boto.exception import GSResponseError +from boto.gs.connection import GSConnection +from tests.integration.gs import util +from tests.integration.gs.util import retry +from tests.unit import unittest + +@unittest.skipUnless(util.has_google_credentials(), + "Google credentials are required to run the Google " + "Cloud Storage tests. Update your boto.cfg to run " + "these tests.") +class GSTestCase(unittest.TestCase): + gs = True + + def setUp(self): + self._conn = GSConnection() + self._buckets = [] + self._tempdirs = [] + + # Retry with an exponential backoff if a server error is received. This + # ensures that we try *really* hard to clean up after ourselves. + @retry(GSResponseError) + def tearDown(self): + while len(self._tempdirs): + tmpdir = self._tempdirs.pop() + shutil.rmtree(tmpdir, ignore_errors=True) + + while(len(self._buckets)): + b = self._buckets[-1] + bucket = self._conn.get_bucket(b) + while len(list(bucket.list_versions())) > 0: + for k in bucket.list_versions(): + bucket.delete_key(k.name, generation=k.generation) + bucket.delete() + self._buckets.pop() + + def _GetConnection(self): + """Returns the GSConnection object used to connect to GCS.""" + return self._conn + + def _MakeTempName(self): + """Creates and returns a temporary name for testing that is likely to be + unique.""" + return "boto-gs-test-%s" % repr(time.time()).replace(".", "-") + + def _MakeBucketName(self): + """Creates and returns a temporary bucket name for testing that is + likely to be unique.""" + b = self._MakeTempName() + self._buckets.append(b) + return b + + def _MakeBucket(self): + """Creates and returns temporary bucket for testing. After the test, the + contents of the bucket and the bucket itself will be deleted.""" + b = self._conn.create_bucket(self._MakeBucketName()) + return b + + def _MakeKey(self, data='', bucket=None, set_contents=True): + """Creates and returns a Key with provided data. If no bucket is given, + a temporary bucket is created.""" + if data and not set_contents: + # The data and set_contents parameters are mutually exclusive. + raise ValueError('MakeKey called with a non-empty data parameter ' + 'but set_contents was set to False.') + if not bucket: + bucket = self._MakeBucket() + key_name = self._MakeTempName() + k = bucket.new_key(key_name) + if set_contents: + k.set_contents_from_string(data) + return k + + def _MakeVersionedBucket(self): + """Creates and returns temporary versioned bucket for testing. After the + test, the contents of the bucket and the bucket itself will be + deleted.""" + b = self._MakeBucket() + b.configure_versioning(True) + return b + + def _MakeTempDir(self): + """Creates and returns a temporary directory on disk. After the test, + the contents of the directory and the directory itself will be + deleted.""" + tmpdir = tempfile.mkdtemp(prefix=self._MakeTempName()) + self._tempdirs.append(tmpdir) + return tmpdir diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/util.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/util.py new file mode 100644 index 0000000000000000000000000000000000000000..2b76078cd3a90dbc1d5d7676d41b22dcb0f8b813 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/gs/util.py @@ -0,0 +1,86 @@ +# Copyright (c) 2012, Google, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import time + +from boto.provider import Provider + + +_HAS_GOOGLE_CREDENTIALS = None + + +def has_google_credentials(): + global _HAS_GOOGLE_CREDENTIALS + if _HAS_GOOGLE_CREDENTIALS is None: + provider = Provider('google') + if (provider.get_access_key() is None or + provider.get_secret_key() is None): + _HAS_GOOGLE_CREDENTIALS = False + else: + _HAS_GOOGLE_CREDENTIALS = True + return _HAS_GOOGLE_CREDENTIALS + + +def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None): + """Retry calling the decorated function using an exponential backoff. + + Taken from: + https://github.com/saltycrane/retry-decorator + Licensed under BSD: + https://github.com/saltycrane/retry-decorator/blob/master/LICENSE + + :param ExceptionToCheck: the exception to check. may be a tuple of + exceptions to check + :type ExceptionToCheck: Exception or tuple + :param tries: number of times to try (not retry) before giving up + :type tries: int + :param delay: initial delay between retries in seconds + :type delay: int + :param backoff: backoff multiplier e.g. value of 2 will double the delay + each retry + :type backoff: int + :param logger: logger to use. If None, print + :type logger: logging.Logger instance + """ + def deco_retry(f): + def f_retry(*args, **kwargs): + mtries, mdelay = tries, delay + try_one_last_time = True + while mtries > 1: + try: + return f(*args, **kwargs) + try_one_last_time = False + break + except ExceptionToCheck, e: + msg = "%s, Retrying in %d seconds..." % (str(e), mdelay) + if logger: + logger.warning(msg) + else: + print msg + time.sleep(mdelay) + mtries -= 1 + mdelay *= backoff + if try_one_last_time: + return f(*args, **kwargs) + return + return f_retry # true decorator + return deco_retry diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/iam/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/iam/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fc0f80de606af632e63e289a77f78c5eee5ad5e8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/iam/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/iam/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/iam/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..53349de63ab20363f9999be0d90429205ab3d0bd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/iam/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.iam + + +class IAMCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + iam = True + regions = boto.iam.regions() + + def sample_service_call(self, conn): + conn.get_all_users() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/iam/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/iam/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..6e536413d7c914904269b82a33c0a6606598ec5c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/iam/test_connection.py @@ -0,0 +1,47 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +import time + +from tests.compat import unittest + + +class TestIAM(unittest.TestCase): + iam = True + + def test_group_users(self): + # A very basic test to create a group, a user, add the user + # to the group and then delete everything + iam = boto.connect_iam() + + name = 'boto-test-%d' % time.time() + username = 'boto-test-user-%d' % time.time() + + iam.create_group(name) + iam.create_user(username) + + iam.add_user_to_group(name, username) + + iam.remove_user_from_group(name, username) + iam.delete_user(username) + iam.delete_group(name) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/iam/test_password_policy.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/iam/test_password_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..aa86fc57bb9cbcb72431760a235d1040fbb44bbf --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/iam/test_password_policy.py @@ -0,0 +1,80 @@ +# Copyright (c) 2014 Rocket Internet AG. +# Luca Bruno +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +import time + +from tests.compat import unittest + +class IAMAccountPasswordPolicy(unittest.TestCase): + iam = True + + def test_password_policy(self): + # A series of tests to check the password policy API + iam = boto.connect_iam() + + # First preserve what is the current password policy + try: + initial_policy_result = iam.get_account_password_policy() + except boto.exception.BotoServerError as srv_error: + initial_policy = None + if srv_error.status != 404: + raise srv_error + + # Update the policy and check it back + test_min_length = 88 + iam.update_account_password_policy(minimum_password_length=test_min_length) + new_policy = iam.get_account_password_policy() + new_min_length = new_policy['get_account_password_policy_response']\ + ['get_account_password_policy_result']['password_policy']\ + ['minimum_password_length'] + + if test_min_length != int(new_min_length): + raise Exception("Failed to update account password policy") + + # Delete the policy and check the correct deletion + test_policy = '' + iam.delete_account_password_policy() + try: + test_policy = iam.get_account_password_policy() + except boto.exception.BotoServerError as srv_error: + test_policy = None + if srv_error.status != 404: + raise srv_error + + if test_policy is not None: + raise Exception("Failed to delete account password policy") + + # Restore initial account password policy + if initial_policy: + p = initial_policy['get_account_password_policy_response']\ + ['get_account_password_policy_result']['password_policy'] + iam.update_account_password_policy(minimum_password_length=int(p['minimum_password_length']), + allow_users_to_change_password=bool(p['allow_users_to_change_password']), + hard_expiry=bool(p['hard_expiry']), + max_password_age=int(p['max_password_age']), + password_reuse_prevention=int(p['password_reuse_prevention']), + require_lowercase_characters=bool(p['require_lowercase_characters']), + require_numbers=bool(p['require_numbers']), + require_symbols=bool(p['require_symbols']), + require_uppercase_characters=bool(p['require_uppercase_characters'])) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/kinesis/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/kinesis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/kinesis/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/kinesis/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..522778f8c09b89741812076702b0171a5abcac49 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/kinesis/test_cert_verification.py @@ -0,0 +1,38 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.kinesis + + +class KinesisCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + kinesis = True + regions = boto.kinesis.regions() + + def sample_service_call(self, conn): + conn.list_streams() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/kinesis/test_kinesis.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/kinesis/test_kinesis.py new file mode 100644 index 0000000000000000000000000000000000000000..3c61feeba53ab72662273d7b347f0168750275cc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/kinesis/test_kinesis.py @@ -0,0 +1,116 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import time + +import boto +from tests.compat import unittest +from boto.kinesis.exceptions import ResourceNotFoundException + + +class TimeoutError(Exception): + pass + + +class TestKinesis(unittest.TestCase): + def setUp(self): + self.kinesis = boto.connect_kinesis() + + def test_kinesis(self): + kinesis = self.kinesis + + # Create a new stream + kinesis.create_stream('test', 1) + self.addCleanup(self.kinesis.delete_stream, 'test') + + # Wait for the stream to be ready + tries = 0 + while tries < 10: + tries += 1 + time.sleep(15) + response = kinesis.describe_stream('test') + + if response['StreamDescription']['StreamStatus'] == 'ACTIVE': + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + break + else: + raise TimeoutError('Stream is still not active, aborting...') + + # Make a tag. + kinesis.add_tags_to_stream(stream_name='test', tags={'foo': 'bar'}) + + # Check that the correct tag is there. + response = kinesis.list_tags_for_stream(stream_name='test') + self.assertEqual(len(response['Tags']), 1) + self.assertEqual(response['Tags'][0], + {'Key':'foo', 'Value': 'bar'}) + + # Remove the tag and ensure it is removed. + kinesis.remove_tags_from_stream(stream_name='test', tag_keys=['foo']) + response = kinesis.list_tags_for_stream(stream_name='test') + self.assertEqual(len(response['Tags']), 0) + + # Get ready to process some data from the stream + response = kinesis.get_shard_iterator('test', shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Write some data to the stream + data = 'Some data ...' + record = { + 'Data': data, + 'PartitionKey': data, + } + response = kinesis.put_record('test', data, data) + response = kinesis.put_records([record, record.copy()], 'test') + + # Wait for the data to show up + tries = 0 + num_collected = 0 + num_expected_records = 3 + collected_records = [] + while tries < 100: + tries += 1 + time.sleep(1) + + response = kinesis.get_records(shard_iterator) + shard_iterator = response['NextShardIterator'] + for record in response['Records']: + if 'Data' in record: + collected_records.append(record['Data']) + num_collected += 1 + if num_collected >= num_expected_records: + self.assertEqual(num_expected_records, num_collected) + break + else: + raise TimeoutError('No records found, aborting...') + + # Read the data, which should be the same as what we wrote + for record in collected_records: + self.assertEqual(data, record) + + def test_describe_non_existent_stream(self): + with self.assertRaises(ResourceNotFoundException) as cm: + self.kinesis.describe_stream('this-stream-shouldnt-exist') + + # Assert things about the data we passed along. + self.assertEqual(cm.exception.error_code, None) + self.assertTrue('not found' in cm.exception.message) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/kms/test_kms.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/kms/test_kms.py new file mode 100644 index 0000000000000000000000000000000000000000..10238a037d2a3e93ce9631b3b807384021074768 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/kms/test_kms.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.kms.exceptions import NotFoundException +from tests.compat import unittest + + +class TestKMS(unittest.TestCase): + def setUp(self): + self.kms = boto.connect_kms() + + def test_list_keys(self): + response = self.kms.list_keys() + self.assertIn('Keys', response) + + def test_handle_not_found_exception(self): + with self.assertRaises(NotFoundException): + # Describe some key that does not exists + self.kms.describe_key( + key_id='nonexistant_key', + ) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/logs/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/logs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/logs/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/logs/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..2bca7529e5ef46e15d9aeee242da6bdf64d4bd3c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/logs/test_cert_verification.py @@ -0,0 +1,37 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" + +from tests.compat import unittest +from tests.integration import ServiceCertVerificationTest + +import boto.logs + + +class CloudWatchLogsCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + regions = boto.logs.regions() + + def sample_service_call(self, conn): + conn.describe_log_groups() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/logs/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/logs/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..6e826ec75184dd9f918a53a71a0f3173ab9a8094 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/logs/test_layer1.py @@ -0,0 +1,43 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from tests.compat import unittest + + +class TestCloudWatchLogs(unittest.TestCase): + def setUp(self): + self.logs = boto.connect_logs() + + def test_logs(self): + logs = self.logs + + response = logs.describe_log_groups(log_group_name_prefix='test') + self.assertIsInstance(response['logGroups'], list) + + mfilter = '[ip, id, user, ..., status_code=500, size]' + sample = [ + '127.0.0.1 - frank "GET /apache_pb.gif HTTP/1.0" 200 1534', + '127.0.0.1 - frank "GET /apache_pb.gif HTTP/1.0" 500 5324', + ] + response = logs.test_metric_filter(mfilter, sample) + self.assertEqual(len(response['matches']), 1) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/mws/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/mws/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/mws/test.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/mws/test.py new file mode 100644 index 0000000000000000000000000000000000000000..2d3ab96ff08603ce1522775123379b47cad18893 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/mws/test.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python +from __future__ import print_function +import sys +import os +import os.path +from datetime import datetime, timedelta + + +simple = os.environ.get('MWS_MERCHANT', None) +if not simple: + print(""" + Please set the MWS_MERCHANT environmental variable + to your Merchant or SellerId to enable MWS tests. + """) + + +advanced = False +isolator = True +if __name__ == "__main__": + devpath = os.path.relpath(os.path.join('..', '..', '..'), + start=os.path.dirname(__file__)) + sys.path = [devpath] + sys.path + advanced = simple and True or False + if advanced: + print('>>> advanced MWS tests; using local boto sources') + +from boto.mws.connection import MWSConnection +from tests.compat import unittest + + +class MWSTestCase(unittest.TestCase): + + def setUp(self): + self.mws = MWSConnection(Merchant=simple, debug=0) + + @unittest.skipUnless(simple and isolator, "skipping simple test") + def test_feedlist(self): + self.mws.get_feed_submission_list() + + @unittest.skipUnless(simple and isolator, "skipping simple test") + def test_inbound_status(self): + response = self.mws.get_inbound_service_status() + status = response.GetServiceStatusResult.Status + self.assertIn(status, ('GREEN', 'GREEN_I', 'YELLOW', 'RED')) + + @property + def marketplace(self): + try: + return self._marketplace + except AttributeError: + response = self.mws.list_marketplace_participations() + result = response.ListMarketplaceParticipationsResult + self._marketplace = result.ListMarketplaces.Marketplace[0] + return self.marketplace + + @property + def marketplace_id(self): + return self.marketplace.MarketplaceId + + @unittest.skipUnless(simple and isolator, "skipping simple test") + def test_marketplace_participations(self): + response = self.mws.list_marketplace_participations() + result = response.ListMarketplaceParticipationsResult + self.assertTrue(result.ListMarketplaces.Marketplace[0].MarketplaceId) + + @unittest.skipUnless(simple and isolator, "skipping simple test") + def test_get_product_categories_for_asin(self): + asin = '144930544X' + response = self.mws.get_product_categories_for_asin( + MarketplaceId=self.marketplace_id, + ASIN=asin) + self.assertEqual(len(response._result.Self), 3) + categoryids = [x.ProductCategoryId for x in response._result.Self] + self.assertSequenceEqual(categoryids, ['285856', '21', '491314']) + + @unittest.skipUnless(simple and isolator, "skipping simple test") + def test_list_matching_products(self): + response = self.mws.list_matching_products( + MarketplaceId=self.marketplace_id, + Query='boto') + products = response._result.Products + self.assertTrue(len(products)) + + @unittest.skipUnless(simple and isolator, "skipping simple test") + def test_get_matching_product(self): + asin = 'B001UDRNHO' + response = self.mws.get_matching_product( + MarketplaceId=self.marketplace_id, + ASINList=[asin]) + attributes = response._result[0].Product.AttributeSets.ItemAttributes + self.assertEqual(attributes[0].Label, 'Serengeti') + + @unittest.skipUnless(simple and isolator, "skipping simple test") + def test_get_matching_product_for_id(self): + asins = ['B001UDRNHO', '144930544X'] + response = self.mws.get_matching_product_for_id( + MarketplaceId=self.marketplace_id, + IdType='ASIN', + IdList=asins) + self.assertEqual(len(response._result), 2) + for result in response._result: + self.assertEqual(len(result.Products.Product), 1) + + @unittest.skipUnless(simple and isolator, "skipping simple test") + def test_get_lowest_offer_listings_for_asin(self): + asin = '144930544X' + response = self.mws.get_lowest_offer_listings_for_asin( + MarketplaceId=self.marketplace_id, + ItemCondition='New', + ASINList=[asin]) + listings = response._result[0].Product.LowestOfferListings + self.assertTrue(len(listings.LowestOfferListing)) + + @unittest.skipUnless(simple and isolator, "skipping simple test") + def test_list_inventory_supply(self): + asof = (datetime.today() - timedelta(days=30)).isoformat() + response = self.mws.list_inventory_supply(QueryStartDateTime=asof, + ResponseGroup='Basic') + self.assertTrue(hasattr(response._result, 'InventorySupplyList')) + +if __name__ == "__main__": + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/opsworks/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/opsworks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/opsworks/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/opsworks/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..caf0857dc0dfecf4f618afff0bee87acc0b30f4c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/opsworks/test_layer1.py @@ -0,0 +1,54 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import JSONResponseError +from boto.opsworks import connect_to_region, regions, RegionInfo +from boto.opsworks.layer1 import OpsWorksConnection +from tests.compat import unittest + + +class TestOpsWorksConnection(unittest.TestCase): + opsworks = True + + def setUp(self): + self.api = OpsWorksConnection() + + def test_describe_stacks(self): + response = self.api.describe_stacks() + self.assertIn('Stacks', response) + + def test_validation_errors(self): + with self.assertRaises(JSONResponseError): + self.api.create_stack('testbotostack', 'us-east-1', + 'badarn', 'badarn2') + + +class TestOpsWorksHelpers(unittest.TestCase): + opsworks = True + + def test_regions(self): + response = regions() + self.assertIsInstance(response[0], RegionInfo) + + def test_connect_to_region(self): + connection = connect_to_region('us-east-1') + self.assertIsInstance(connection, OpsWorksConnection) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/rds/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b7fe4c2259e950fde9bf243fe43614898f197076 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/rds/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..0cef27d72ad298fefac74b3d3b4f073f279d1a2c --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.rds + + +class RDSCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + rds = True + regions = boto.rds.regions() + + def sample_service_call(self, conn): + conn.get_all_dbinstances() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/rds/test_db_subnet_group.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds/test_db_subnet_group.py new file mode 100644 index 0000000000000000000000000000000000000000..52d63739fe21cbdcaf1a238461c46485f4485905 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds/test_db_subnet_group.py @@ -0,0 +1,92 @@ +# Copyright (c) 2013 Franc Carter franc.carter@gmail.com +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that db_subnet_groups behave sanely +""" + +import time +import unittest +import boto.rds +from boto.vpc import VPCConnection +from boto.rds import RDSConnection + +def _is_ok(subnet_group, vpc_id, description, subnets): + if subnet_group.vpc_id != vpc_id: + print 'vpc_id is ',subnet_group.vpc_id, 'but should be ', vpc_id + return 0 + if subnet_group.description != description: + print "description is '"+subnet_group.description+"' but should be '"+description+"'" + return 0 + if set(subnet_group.subnet_ids) != set(subnets): + subnets_are = ','.join(subnet_group.subnet_ids) + should_be = ','.join(subnets) + print "subnets are "+subnets_are+" but should be "+should_be + return 0 + return 1 + + +class DbSubnetGroupTest(unittest.TestCase): + rds = True + + def test_db_subnet_group(self): + vpc_api = VPCConnection() + rds_api = RDSConnection() + vpc = vpc_api.create_vpc('10.0.0.0/16') + + az_list = vpc_api.get_all_zones(filters={'state':'available'}) + subnet = list() + n = 0; + for az in az_list: + try: + subnet.append(vpc_api.create_subnet(vpc.id, '10.0.'+str(n)+'.0/24',availability_zone=az.name)) + n = n+1 + except: + pass + + grp_name = 'db_subnet_group'+str(int(time.time())) + subnet_group = rds_api.create_db_subnet_group(grp_name, grp_name, [subnet[0].id,subnet[1].id]) + if not _is_ok(subnet_group, vpc.id, grp_name, [subnet[0].id,subnet[1].id]): + raise Exception("create_db_subnet_group returned bad values") + + rds_api.modify_db_subnet_group(grp_name, description='new description') + subnet_grps = rds_api.get_all_db_subnet_groups(name=grp_name) + if not _is_ok(subnet_grps[0], vpc.id, 'new description', [subnet[0].id,subnet[1].id]): + raise Exception("modifying the subnet group desciption returned bad values") + + rds_api.modify_db_subnet_group(grp_name, subnet_ids=[subnet[1].id,subnet[2].id]) + subnet_grps = rds_api.get_all_db_subnet_groups(name=grp_name) + if not _is_ok(subnet_grps[0], vpc.id, 'new description', [subnet[1].id,subnet[2].id]): + raise Exception("modifying the subnet group subnets returned bad values") + + rds_api.delete_db_subnet_group(subnet_group.name) + try: + rds_api.get_all_db_subnet_groups(name=grp_name) + raise Exception(subnet_group.name+" still accessible after delete_db_subnet_group") + except: + pass + + while n > 0: + n = n-1 + vpc_api.delete_subnet(subnet[n].id) + vpc_api.delete_vpc(vpc.id) + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/rds/test_promote_modify.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds/test_promote_modify.py new file mode 100644 index 0000000000000000000000000000000000000000..20963ed2739e3ecafa9c4766a4d40528cec33bc1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds/test_promote_modify.py @@ -0,0 +1,138 @@ +# Author: Bruce Pennypacker +# +# Create a temporary RDS database instance, then create a read-replica of the +# instance. Once the replica is available, promote it and verify that the +# promotion succeeds, then rename it. Delete the databases upon completion i +# of the tests. +# +# For each step (creating the databases, promoting, etc) we loop for up +# to 15 minutes to wait for the instance to become available. It should +# never take that long for any of the steps to complete. + +""" +Check that promotion of read replicas and renaming instances works as expected +""" + +import unittest +import time +from boto.rds import RDSConnection + +class PromoteReadReplicaTest(unittest.TestCase): + rds = True + + def setUp(self): + self.conn = RDSConnection() + self.masterDB_name = "boto-db-%s" % str(int(time.time())) + self.replicaDB_name = "replica-%s" % self.masterDB_name + self.renamedDB_name = "renamed-replica-%s" % self.masterDB_name + + + def tearDown(self): + instances = self.conn.get_all_dbinstances() + for db in [self.masterDB_name, self.replicaDB_name, self.renamedDB_name]: + for i in instances: + if i.id == db: + self.conn.delete_dbinstance(db, skip_final_snapshot=True) + + def test_promote(self): + print '--- running RDS promotion & renaming tests ---' + self.masterDB = self.conn.create_dbinstance(self.masterDB_name, 5, 'db.t1.micro', 'root', 'bototestpw') + + # Wait up to 15 minutes for the masterDB to become available + print '--- waiting for "%s" to become available ---' % self.masterDB_name + wait_timeout = time.time() + (15 * 60) + time.sleep(60) + + instances = self.conn.get_all_dbinstances(self.masterDB_name) + inst = instances[0] + + while wait_timeout > time.time() and inst.status != 'available': + time.sleep(15) + instances = self.conn.get_all_dbinstances(self.masterDB_name) + inst = instances[0] + + self.assertTrue(inst.status == 'available') + + self.replicaDB = self.conn.create_dbinstance_read_replica(self.replicaDB_name, self.masterDB_name) + + # Wait up to 15 minutes for the replicaDB to become available + print '--- waiting for "%s" to become available ---' % self.replicaDB_name + wait_timeout = time.time() + (15 * 60) + time.sleep(60) + + instances = self.conn.get_all_dbinstances(self.replicaDB_name) + inst = instances[0] + + while wait_timeout > time.time() and inst.status != 'available': + time.sleep(15) + instances = self.conn.get_all_dbinstances(self.replicaDB_name) + inst = instances[0] + + self.assertTrue(inst.status == 'available') + + # Promote the replicaDB and wait for it to become available + self.replicaDB = self.conn.promote_read_replica(self.replicaDB_name) + + # Wait up to 15 minutes for the replicaDB to become available + print '--- waiting for "%s" to be promoted and available ---' % self.replicaDB_name + wait_timeout = time.time() + (15 * 60) + time.sleep(60) + + instances = self.conn.get_all_dbinstances(self.replicaDB_name) + inst = instances[0] + + while wait_timeout > time.time() and inst.status != 'available': + time.sleep(15) + instances = self.conn.get_all_dbinstances(self.replicaDB_name) + inst = instances[0] + + # Verify that the replica is now a standalone instance and no longer + # functioning as a read replica + self.assertTrue(inst) + self.assertTrue(inst.status == 'available') + self.assertFalse(inst.status_infos) + + # Verify that the master no longer has any read replicas + instances = self.conn.get_all_dbinstances(self.masterDB_name) + inst = instances[0] + self.assertFalse(inst.read_replica_dbinstance_identifiers) + + print '--- renaming "%s" to "%s" ---' % ( self.replicaDB_name, self.renamedDB_name ) + + self.renamedDB = self.conn.modify_dbinstance(self.replicaDB_name, new_instance_id=self.renamedDB_name, apply_immediately=True) + + # Wait up to 15 minutes for the masterDB to become available + print '--- waiting for "%s" to exist ---' % self.renamedDB_name + + wait_timeout = time.time() + (15 * 60) + time.sleep(60) + + # Wait up to 15 minutes until the new name shows up in the instance table + found = False + while found == False and wait_timeout > time.time(): + instances = self.conn.get_all_dbinstances() + for i in instances: + if i.id == self.renamedDB_name: + found = True + if found == False: + time.sleep(15) + + self.assertTrue(found) + + print '--- waiting for "%s" to become available ---' % self.renamedDB_name + + instances = self.conn.get_all_dbinstances(self.renamedDB_name) + inst = instances[0] + + # Now wait for the renamed instance to become available + while wait_timeout > time.time() and inst.status != 'available': + time.sleep(15) + instances = self.conn.get_all_dbinstances(self.renamedDB_name) + inst = instances[0] + + self.assertTrue(inst.status == 'available') + + # Since the replica DB was renamed... + self.replicaDB = None + + print '--- tests completed ---' diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/rds2/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b7fe4c2259e950fde9bf243fe43614898f197076 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds2/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/rds2/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds2/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..5ad56356bde95bbd15d33b7d27cdbceeb42cfdf7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds2/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.rds2 + + +class RDSCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + rds = True + regions = boto.rds2.regions() + + def sample_service_call(self, conn): + conn.describe_db_instances() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/rds2/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds2/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..82d8193b5edc18afe78cb432efdc2dd8055f52c9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/rds2/test_connection.py @@ -0,0 +1,93 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import unittest +import time +from boto.rds2.layer1 import RDSConnection + + +class TestRDS2Connection(unittest.TestCase): + rds = True + + def setUp(self): + self.conn = RDSConnection() + self.db_name = "test-db-%s" % str(int(time.time())) + + def test_connect_rds(self): + # Upon release, this did not function correct. Ensure that + # args are passed correctly. + import boto + conn = boto.connect_rds2() + + def test_integration(self): + resp = self.conn.create_db_instance( + db_instance_identifier=self.db_name, + allocated_storage=5, + db_instance_class='db.t1.micro', + engine='postgres', + master_username='bototestuser', + master_user_password='testtestt3st', + # Try to limit the impact & test options. + multi_az=False, + backup_retention_period=0 + ) + self.addCleanup( + self.conn.delete_db_instance, + self.db_name, + skip_final_snapshot=True + ) + + # Wait for 6 minutes for it to come up. + time.sleep(60 * 6) + + instances = self.conn.describe_db_instances(self.db_name) + inst = instances['DescribeDBInstancesResponse']\ + ['DescribeDBInstancesResult']['DBInstances'][0] + self.assertEqual(inst['DBInstanceStatus'], 'available') + self.assertEqual(inst['Engine'], 'postgres') + self.assertEqual(inst['AllocatedStorage'], 5) + + # Try renaming it. + resp = self.conn.modify_db_instance( + self.db_name, + allocated_storage=10, + apply_immediately=True + ) + + # Give it a chance to start modifying... + time.sleep(60) + + instances = self.conn.describe_db_instances(self.db_name) + inst = instances['DescribeDBInstancesResponse']\ + ['DescribeDBInstancesResult']['DBInstances'][0] + self.assertEqual(inst['DBInstanceStatus'], 'modifying') + self.assertEqual(inst['Engine'], 'postgres') + + # ...then finish the remainder of 10 minutes for the change. + time.sleep(60 * 9) + + instances = self.conn.describe_db_instances(self.db_name) + inst = instances['DescribeDBInstancesResponse']\ + ['DescribeDBInstancesResult']['DBInstances'][0] + self.assertEqual(inst['DBInstanceStatus'], 'available') + self.assertEqual(inst['Engine'], 'postgres') + self.assertEqual(inst['AllocatedStorage'], 10) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/redshift/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/redshift/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/redshift/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/redshift/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..3081c5771fc1c4be6c5cb58ca33f18e0b3ea7445 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/redshift/test_cert_verification.py @@ -0,0 +1,34 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.redshift + + +class RedshiftCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + redshift = True + regions = boto.redshift.regions() + + def sample_service_call(self, conn): + conn.describe_cluster_versions() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/redshift/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/redshift/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..490618e1785d835cc0b6c9b8aabe3eefe0bf0980 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/redshift/test_layer1.py @@ -0,0 +1,134 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import unittest +import time + +from nose.plugins.attrib import attr + +from boto.redshift.layer1 import RedshiftConnection +from boto.redshift.exceptions import ClusterNotFoundFault +from boto.redshift.exceptions import ResizeNotFoundFault + + +class TestRedshiftLayer1Management(unittest.TestCase): + redshift = True + + def setUp(self): + self.api = RedshiftConnection() + self.cluster_prefix = 'boto-redshift-cluster-%s' + self.node_type = 'dw.hs1.xlarge' + self.master_username = 'mrtest' + self.master_password = 'P4ssword' + self.db_name = 'simon' + # Redshift was taking ~20 minutes to bring clusters up in testing. + self.wait_time = 60 * 20 + + def cluster_id(self): + # This need to be unique per-test method. + return self.cluster_prefix % str(int(time.time())) + + def create_cluster(self): + cluster_id = self.cluster_id() + self.api.create_cluster( + cluster_id, self.node_type, + self.master_username, self.master_password, + db_name=self.db_name, number_of_nodes=3 + ) + + # Wait for it to come up. + time.sleep(self.wait_time) + + self.addCleanup(self.delete_cluster_the_slow_way, cluster_id) + return cluster_id + + def delete_cluster_the_slow_way(self, cluster_id): + # Because there might be other operations in progress. :( + time.sleep(self.wait_time) + + self.api.delete_cluster(cluster_id, skip_final_cluster_snapshot=True) + + @attr('notdefault') + def test_create_delete_cluster(self): + cluster_id = self.cluster_id() + self.api.create_cluster( + cluster_id, self.node_type, + self.master_username, self.master_password, + db_name=self.db_name, number_of_nodes=3 + ) + + # Wait for it to come up. + time.sleep(self.wait_time) + + self.api.delete_cluster(cluster_id, skip_final_cluster_snapshot=True) + + @attr('notdefault') + def test_as_much_as_possible_before_teardown(self): + # Per @garnaat, for the sake of suite time, we'll test as much as we + # can before we teardown. + + # Test a non-existent cluster ID. + with self.assertRaises(ClusterNotFoundFault): + self.api.describe_clusters('badpipelineid') + + # Now create the cluster & move on. + cluster_id = self.create_cluster() + + # Test never resized. + with self.assertRaises(ResizeNotFoundFault): + self.api.describe_resize(cluster_id) + + # The cluster shows up in describe_clusters + clusters = self.api.describe_clusters()['DescribeClustersResponse']\ + ['DescribeClustersResult']\ + ['Clusters'] + cluster_ids = [c['ClusterIdentifier'] for c in clusters] + self.assertIn(cluster_id, cluster_ids) + + # The cluster shows up in describe_clusters w/ id + response = self.api.describe_clusters(cluster_id) + self.assertEqual(response['DescribeClustersResponse']\ + ['DescribeClustersResult']['Clusters'][0]\ + ['ClusterIdentifier'], cluster_id) + + snapshot_id = "snap-%s" % cluster_id + + # Test creating a snapshot. + response = self.api.create_cluster_snapshot(snapshot_id, cluster_id) + self.assertEqual(response['CreateClusterSnapshotResponse']\ + ['CreateClusterSnapshotResult']['Snapshot']\ + ['SnapshotIdentifier'], snapshot_id) + self.assertEqual(response['CreateClusterSnapshotResponse']\ + ['CreateClusterSnapshotResult']['Snapshot']\ + ['Status'], 'creating') + self.addCleanup(self.api.delete_cluster_snapshot, snapshot_id) + + # More waiting. :( + time.sleep(self.wait_time) + + # Describe the snapshots. + response = self.api.describe_cluster_snapshots( + cluster_identifier=cluster_id + ) + snap = response['DescribeClusterSnapshotsResponse']\ + ['DescribeClusterSnapshotsResult']['Snapshots'][-1] + self.assertEqual(snap['SnapshotType'], 'manual') + self.assertEqual(snap['DBName'], self.db_name) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..417a3a082de036b75315f424a9a257a505aaa1cd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/__init__.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Tellybug, Matt Millar +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import time +import unittest +from nose.plugins.attrib import attr +from boto.route53.connection import Route53Connection + + +@attr(route53=True) +class Route53TestCase(unittest.TestCase): + def setUp(self): + super(Route53TestCase, self).setUp() + self.conn = Route53Connection() + self.base_domain = 'boto-test-%s.com' % str(int(time.time())) + self.zone = self.conn.create_zone(self.base_domain) + + def tearDown(self): + self.zone.delete() + super(Route53TestCase, self).tearDown() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/domains/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/domains/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ace4a491fe541d32b5c105ac5e4010d360de9448 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/domains/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/domains/test_route53domains.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/domains/test_route53domains.py new file mode 100644 index 0000000000000000000000000000000000000000..462ee1b0ce9f98722189e6cee27c2b6b6b286ffb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/domains/test_route53domains.py @@ -0,0 +1,44 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.route53.domains.exceptions import InvalidInput +from tests.compat import unittest + + +class TestRoute53Domains(unittest.TestCase): + def setUp(self): + self.route53domains = boto.connect_route53domains() + + def test_check_domain_availability(self): + response = self.route53domains.check_domain_availability( + domain_name='amazon.com', + idn_lang_code='eng' + ) + self.assertEqual(response, {'Availability': 'UNAVAILABLE'}) + + def test_handle_invalid_input_error(self): + with self.assertRaises(InvalidInput): + # Note the invalid character in the domain name. + self.route53domains.check_domain_availability( + domain_name='!amazon.com', + ) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_alias_resourcerecordsets.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_alias_resourcerecordsets.py new file mode 100644 index 0000000000000000000000000000000000000000..6a753883d2741d474979c31578c494b1d11df938 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_alias_resourcerecordsets.py @@ -0,0 +1,114 @@ +# Copyright (c) 2014 Netflix, Inc. Stefan Praszalowicz +# Copyright (c) 2014 42Lines, Inc. Jim Browne +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import time +from tests.compat import unittest +from boto.route53.connection import Route53Connection +from boto.route53.record import ResourceRecordSets +from boto.route53.exception import DNSServerError + + +class TestRoute53AliasResourceRecordSets(unittest.TestCase): + route53 = True + + def setUp(self): + super(TestRoute53AliasResourceRecordSets, self).setUp() + self.conn = Route53Connection() + self.base_domain = 'boto-test-%s.com' % str(int(time.time())) + self.zone = self.conn.create_zone(self.base_domain) + # a standard record to use as the target for our alias + self.zone.add_a('target.%s' % self.base_domain, '102.11.23.1') + + def tearDown(self): + self.zone.delete_a('target.%s' % self.base_domain) + self.zone.delete() + super(TestRoute53AliasResourceRecordSets, self).tearDown() + + def test_incomplete_add_alias_failure(self): + base_record = dict(name="alias.%s." % self.base_domain, + type="A", + alias_dns_name="target.%s" % self.base_domain, + alias_hosted_zone_id=self.zone.id, + identifier="boto:TestRoute53AliasResourceRecordSets") + + rrs = ResourceRecordSets(self.conn, self.zone.id) + rrs.add_change(action="UPSERT", **base_record) + + try: + self.assertRaises(DNSServerError, rrs.commit) + except: + # if the call somehow goes through, delete our unexpected new record before failing test + rrs = ResourceRecordSets(self.conn, self.zone.id) + rrs.add_change(action="DELETE", **base_record) + rrs.commit() + raise + + def test_add_alias(self): + base_record = dict(name="alias.%s." % self.base_domain, + type="A", + alias_evaluate_target_health=False, + alias_dns_name="target.%s" % self.base_domain, + alias_hosted_zone_id=self.zone.id, + identifier="boto:TestRoute53AliasResourceRecordSets") + + rrs = ResourceRecordSets(self.conn, self.zone.id) + rrs.add_change(action="UPSERT", **base_record) + rrs.commit() + + rrs = ResourceRecordSets(self.conn, self.zone.id) + rrs.add_change(action="DELETE", **base_record) + rrs.commit() + + def test_set_alias(self): + base_record = dict(name="alias.%s." % self.base_domain, + type="A", + identifier="boto:TestRoute53AliasResourceRecordSets") + + rrs = ResourceRecordSets(self.conn, self.zone.id) + new = rrs.add_change(action="UPSERT", **base_record) + new.set_alias(self.zone.id, "target.%s" % self.base_domain, False) + rrs.commit() + + rrs = ResourceRecordSets(self.conn, self.zone.id) + delete = rrs.add_change(action="DELETE", **base_record) + delete.set_alias(self.zone.id, "target.%s" % self.base_domain, False) + rrs.commit() + + def test_set_alias_backwards_compatability(self): + base_record = dict(name="alias.%s." % self.base_domain, + type="A", + identifier="boto:TestRoute53AliasResourceRecordSets") + + rrs = ResourceRecordSets(self.conn, self.zone.id) + new = rrs.add_change(action="UPSERT", **base_record) + new.set_alias(self.zone.id, "target.%s" % self.base_domain) + rrs.commit() + + rrs = ResourceRecordSets(self.conn, self.zone.id) + delete = rrs.add_change(action="DELETE", **base_record) + delete.set_alias(self.zone.id, "target.%s" % self.base_domain) + rrs.commit() + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..7c90950d24adfe8be15963d5567c66fe64a6487f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_cert_verification.py @@ -0,0 +1,41 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +from tests.compat import unittest +from nose.plugins.attrib import attr + +from tests.integration import ServiceCertVerificationTest + +import boto.route53 + + +@attr(route53=True) +class Route53CertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + route53 = True + regions = boto.route53.regions() + + def sample_service_call(self, conn): + conn.get_all_hosted_zones() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_health_check.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_health_check.py new file mode 100644 index 0000000000000000000000000000000000000000..4b4d2bcf931ad6ab6b51f6f5eadb5fcadcd12f0a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_health_check.py @@ -0,0 +1,175 @@ +# Copyright (c) 2014 Tellybug, Matt Millar +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.integration.route53 import Route53TestCase + +from boto.compat import six +from boto.route53.healthcheck import HealthCheck +from boto.route53.record import ResourceRecordSets + + +class TestRoute53HealthCheck(Route53TestCase): + def test_create_health_check(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing") + result = self.conn.create_health_check(hc) + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTP') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '80') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing') + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_create_https_health_check(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=443, hc_type="HTTPS", resource_path="/testing") + result = self.conn.create_health_check(hc) + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTPS') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '443') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing') + self.assertFalse('FullyQualifiedDomainName' in result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig']) + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_create_https_health_check_fqdn(self): + hc = HealthCheck(ip_addr=None, port=443, hc_type="HTTPS", resource_path="/", fqdn="google.com") + result = self.conn.create_health_check(hc) + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTPS') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'FullyQualifiedDomainName'], 'google.com') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '443') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/') + self.assertFalse('IPAddress' in result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig']) + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_create_and_list_health_check(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing") + result1 = self.conn.create_health_check(hc) + hc = HealthCheck(ip_addr="54.217.7.119", port=80, hc_type="HTTP", resource_path="/testing") + result2 = self.conn.create_health_check(hc) + result = self.conn.get_list_health_checks() + self.assertTrue(len(result['ListHealthChecksResponse']['HealthChecks']) > 1) + self.conn.delete_health_check(result1['CreateHealthCheckResponse']['HealthCheck']['Id']) + self.conn.delete_health_check(result2['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_delete_health_check(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing") + result = self.conn.create_health_check(hc) + hc_id = result['CreateHealthCheckResponse']['HealthCheck']['Id'] + result = self.conn.get_list_health_checks() + found = False + for hc in result['ListHealthChecksResponse']['HealthChecks']: + if hc['Id'] == hc_id: + found = True + break + self.assertTrue(found) + result = self.conn.delete_health_check(hc_id) + result = self.conn.get_list_health_checks() + for hc in result['ListHealthChecksResponse']['HealthChecks']: + self.assertFalse(hc['Id'] == hc_id) + + def test_create_health_check_string_match(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP_STR_MATCH", resource_path="/testing", string_match="test") + result = self.conn.create_health_check(hc) + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTP_STR_MATCH') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '80') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'SearchString'], 'test') + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_create_health_check_https_string_match(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTPS_STR_MATCH", resource_path="/testing", string_match="test") + result = self.conn.create_health_check(hc) + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Type'], 'HTTPS_STR_MATCH') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'IPAddress'], '54.217.7.118') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'Port'], '80') + self.assertEquals(result[u'CreateHealthCheckResponse'][ + u'HealthCheck'][u'HealthCheckConfig'][u'ResourcePath'], '/testing') + self.assertEquals(result[u'CreateHealthCheckResponse'][u'HealthCheck'][u'HealthCheckConfig'][u'SearchString'], 'test') + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_create_resource_record_set(self): + hc = HealthCheck(ip_addr="54.217.7.118", port=80, hc_type="HTTP", resource_path="/testing") + result = self.conn.create_health_check(hc) + records = ResourceRecordSets( + connection=self.conn, hosted_zone_id=self.zone.id, comment='Create DNS entry for test') + change = records.add_change('CREATE', 'unittest.%s.' % self.base_domain, 'A', ttl=30, identifier='test', + weight=1, health_check=result['CreateHealthCheckResponse']['HealthCheck']['Id']) + change.add_value("54.217.7.118") + records.commit() + + records = ResourceRecordSets(self.conn, self.zone.id) + deleted = records.add_change('DELETE', "unittest.%s." % self.base_domain, "A", ttl=30, identifier='test', + weight=1, health_check=result['CreateHealthCheckResponse']['HealthCheck']['Id']) + deleted.add_value('54.217.7.118') + records.commit() + + def test_create_health_check_invalid_request_interval(self): + """Test that health checks cannot be created with an invalid + 'request_interval'. + + """ + self.assertRaises(AttributeError, lambda: HealthCheck(**self.health_check_params(request_interval=5))) + + def test_create_health_check_invalid_failure_threshold(self): + """ + Test that health checks cannot be created with an invalid + 'failure_threshold'. + """ + self.assertRaises(AttributeError, lambda: HealthCheck(**self.health_check_params(failure_threshold=0))) + self.assertRaises(AttributeError, lambda: HealthCheck(**self.health_check_params(failure_threshold=11))) + + def test_create_health_check_request_interval(self): + hc_params = self.health_check_params(request_interval=10) + hc = HealthCheck(**hc_params) + result = self.conn.create_health_check(hc) + hc_config = (result[u'CreateHealthCheckResponse'] + [u'HealthCheck'][u'HealthCheckConfig']) + self.assertEquals(hc_config[u'RequestInterval'], + six.text_type(hc_params['request_interval'])) + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_create_health_check_failure_threshold(self): + hc_params = self.health_check_params(failure_threshold=1) + hc = HealthCheck(**hc_params) + result = self.conn.create_health_check(hc) + hc_config = (result[u'CreateHealthCheckResponse'] + [u'HealthCheck'][u'HealthCheckConfig']) + self.assertEquals(hc_config[u'FailureThreshold'], + six.text_type(hc_params['failure_threshold'])) + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def health_check_params(self, **kwargs): + params = { + 'ip_addr': "54.217.7.118", + 'port': 80, + 'hc_type': 'HTTP', + 'resource_path': '/testing', + } + params.update(kwargs) + return params diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_resourcerecordsets.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_resourcerecordsets.py new file mode 100644 index 0000000000000000000000000000000000000000..76a931e829e8da944b6d081c84bdc0dc76ab3c19 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_resourcerecordsets.py @@ -0,0 +1,91 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.compat import unittest +from tests.integration.route53 import Route53TestCase + +from boto.route53.record import ResourceRecordSets + + +class TestRoute53ResourceRecordSets(Route53TestCase): + def test_add_change(self): + rrs = ResourceRecordSets(self.conn, self.zone.id) + + created = rrs.add_change("CREATE", "vpn.%s." % self.base_domain, "A") + created.add_value('192.168.0.25') + rrs.commit() + + rrs = ResourceRecordSets(self.conn, self.zone.id) + deleted = rrs.add_change('DELETE', "vpn.%s." % self.base_domain, "A") + deleted.add_value('192.168.0.25') + rrs.commit() + + def test_record_count(self): + rrs = ResourceRecordSets(self.conn, self.zone.id) + hosts = 101 + + for hostid in range(hosts): + rec = "test" + str(hostid) + ".%s" % self.base_domain + created = rrs.add_change("CREATE", rec, "A") + ip = '192.168.0.' + str(hostid) + created.add_value(ip) + + # Max 100 changes per commit + if (hostid + 1) % 100 == 0: + rrs.commit() + rrs = ResourceRecordSets(self.conn, self.zone.id) + + rrs.commit() + + all_records = self.conn.get_all_rrsets(self.zone.id) + + # First time around was always fine + i = 0 + for rset in all_records: + i += 1 + + # Second time was a failure + i = 0 + for rset in all_records: + i += 1 + + # Cleanup indivual records + rrs = ResourceRecordSets(self.conn, self.zone.id) + for hostid in range(hosts): + rec = "test" + str(hostid) + ".%s" % self.base_domain + deleted = rrs.add_change("DELETE", rec, "A") + ip = '192.168.0.' + str(hostid) + deleted.add_value(ip) + + # Max 100 changes per commit + if (hostid + 1) % 100 == 0: + rrs.commit() + rrs = ResourceRecordSets(self.conn, self.zone.id) + + rrs.commit() + + # 2nd count should match the number of hosts plus NS/SOA records + records = hosts + 2 + self.assertEqual(i, records) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_zone.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_zone.py new file mode 100644 index 0000000000000000000000000000000000000000..d6351dd62c46d1cab4da5b628405fb8f55fbf26e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/route53/test_zone.py @@ -0,0 +1,196 @@ +# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton +# www.bluepines.org +# Copyright (c) 2012 42 Lines Inc., Jim Browne +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import time +from tests.compat import unittest +from nose.plugins.attrib import attr +from boto.route53.connection import Route53Connection +from boto.exception import TooManyRecordsException +from boto.vpc import VPCConnection + + +@attr(route53=True) +class TestRoute53Zone(unittest.TestCase): + @classmethod + def setUpClass(self): + route53 = Route53Connection() + self.base_domain = 'boto-test-%s.com' % str(int(time.time())) + zone = route53.get_zone(self.base_domain) + if zone is not None: + zone.delete() + self.zone = route53.create_zone(self.base_domain) + + def test_nameservers(self): + self.zone.get_nameservers() + + def test_a(self): + self.zone.add_a(self.base_domain, '102.11.23.1', 80) + record = self.zone.get_a(self.base_domain) + self.assertEquals(record.name, u'%s.' % self.base_domain) + self.assertEquals(record.resource_records, [u'102.11.23.1']) + self.assertEquals(record.ttl, u'80') + self.zone.update_a(self.base_domain, '186.143.32.2', '800') + record = self.zone.get_a(self.base_domain) + self.assertEquals(record.name, u'%s.' % self.base_domain) + self.assertEquals(record.resource_records, [u'186.143.32.2']) + self.assertEquals(record.ttl, u'800') + + def test_cname(self): + self.zone.add_cname( + 'www.%s' % self.base_domain, + 'webserver.%s' % self.base_domain, + 200 + ) + record = self.zone.get_cname('www.%s' % self.base_domain) + self.assertEquals(record.name, u'www.%s.' % self.base_domain) + self.assertEquals(record.resource_records, [ + u'webserver.%s.' % self.base_domain + ]) + self.assertEquals(record.ttl, u'200') + self.zone.update_cname( + 'www.%s' % self.base_domain, + 'web.%s' % self.base_domain, + 45 + ) + record = self.zone.get_cname('www.%s' % self.base_domain) + self.assertEquals(record.name, u'www.%s.' % self.base_domain) + self.assertEquals(record.resource_records, [ + u'web.%s.' % self.base_domain + ]) + self.assertEquals(record.ttl, u'45') + + def test_mx(self): + self.zone.add_mx( + self.base_domain, + [ + '10 mx1.%s' % self.base_domain, + '20 mx2.%s' % self.base_domain, + ], + 1000 + ) + record = self.zone.get_mx(self.base_domain) + self.assertEquals(set(record.resource_records), + set([u'10 mx1.%s.' % self.base_domain, + u'20 mx2.%s.' % self.base_domain])) + self.assertEquals(record.ttl, u'1000') + self.zone.update_mx( + self.base_domain, + [ + '10 mail1.%s' % self.base_domain, + '20 mail2.%s' % self.base_domain, + ], + 50 + ) + record = self.zone.get_mx(self.base_domain) + self.assertEquals(set(record.resource_records), + set([u'10 mail1.%s.' % self.base_domain, + '20 mail2.%s.' % self.base_domain])) + self.assertEquals(record.ttl, u'50') + + def test_get_records(self): + self.zone.get_records() + + def test_get_nameservers(self): + self.zone.get_nameservers() + + def test_get_zones(self): + route53 = Route53Connection() + route53.get_zones() + + def test_identifiers_wrrs(self): + self.zone.add_a('wrr.%s' % self.base_domain, '1.2.3.4', + identifier=('foo', '20')) + self.zone.add_a('wrr.%s' % self.base_domain, '5.6.7.8', + identifier=('bar', '10')) + wrrs = self.zone.find_records( + 'wrr.%s' % self.base_domain, + 'A', + all=True + ) + self.assertEquals(len(wrrs), 2) + self.zone.delete_a('wrr.%s' % self.base_domain, all=True) + + def test_identifiers_lbrs(self): + self.zone.add_a('lbr.%s' % self.base_domain, '4.3.2.1', + identifier=('baz', 'us-east-1')) + self.zone.add_a('lbr.%s' % self.base_domain, '8.7.6.5', + identifier=('bam', 'us-west-1')) + lbrs = self.zone.find_records( + 'lbr.%s' % self.base_domain, + 'A', + all=True + ) + self.assertEquals(len(lbrs), 2) + self.zone.delete_a('lbr.%s' % self.base_domain, + identifier=('bam', 'us-west-1')) + self.zone.delete_a('lbr.%s' % self.base_domain, + identifier=('baz', 'us-east-1')) + + def test_toomany_exception(self): + self.zone.add_a('exception.%s' % self.base_domain, '4.3.2.1', + identifier=('baz', 'us-east-1')) + self.zone.add_a('exception.%s' % self.base_domain, '8.7.6.5', + identifier=('bam', 'us-west-1')) + self.assertRaises(TooManyRecordsException, + lambda: self.zone.get_a('exception.%s' % + self.base_domain)) + self.zone.delete_a('exception.%s' % self.base_domain, all=True) + + @classmethod + def tearDownClass(self): + self.zone.delete_a(self.base_domain) + self.zone.delete_cname('www.%s' % self.base_domain) + self.zone.delete_mx(self.base_domain) + self.zone.delete() + + +@attr(route53=True) +class TestRoute53PrivateZone(unittest.TestCase): + @classmethod + def setUpClass(self): + time_str = str(int(time.time())) + self.route53 = Route53Connection() + self.base_domain = 'boto-private-zone-test-%s.com' % time_str + self.vpc = VPCConnection() + self.test_vpc = self.vpc.create_vpc(cidr_block='10.11.0.0/16') + # tag the vpc to make it easily identifiable if things go spang + self.test_vpc.add_tag("Name", self.base_domain) + self.zone = self.route53.get_zone(self.base_domain) + if self.zone is not None: + self.zone.delete() + + def test_create_private_zone(self): + self.zone = self.route53.create_hosted_zone(self.base_domain, + private_zone=True, + vpc_id=self.test_vpc.id, + vpc_region='us-east-1') + + @classmethod + def tearDownClass(self): + if self.zone is not None: + self.zone.delete() + self.test_vpc.delete() + +if __name__ == '__main__': + unittest.main(verbosity=3) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b3fc3a0c31bff21cc9c8e0a0189f02cee820e64e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/mock_storage_service.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/mock_storage_service.py new file mode 100644 index 0000000000000000000000000000000000000000..8b5ff28d13b6fd77ff66db359c35afddb91dffc6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/mock_storage_service.py @@ -0,0 +1,606 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Provides basic mocks of core storage service classes, for unit testing: +ACL, Key, Bucket, Connection, and StorageUri. We implement a subset of +the interfaces defined in the real boto classes, but don't handle most +of the optional params (which we indicate with the constant "NOT_IMPL"). +""" + +import copy +import boto +import base64 +import re +from hashlib import md5 + +from boto.utils import compute_md5 +from boto.utils import find_matching_headers +from boto.utils import merge_headers_by_name +from boto.s3.prefix import Prefix +from boto.compat import six + +NOT_IMPL = None + + +class MockAcl(object): + + def __init__(self, parent=NOT_IMPL): + pass + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + pass + + def to_xml(self): + return '' + + +class MockKey(object): + + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + self.data = None + self.etag = None + self.size = None + self.closed = True + self.content_encoding = None + self.content_language = None + self.content_type = None + self.last_modified = 'Wed, 06 Oct 2010 05:11:54 GMT' + self.BufferSize = 8192 + + def __repr__(self): + if self.bucket: + return '' % (self.bucket.name, self.name) + else: + return '' % self.name + + def get_contents_as_string(self, headers=NOT_IMPL, + cb=NOT_IMPL, num_cb=NOT_IMPL, + torrent=NOT_IMPL, + version_id=NOT_IMPL): + return self.data + + def get_contents_to_file(self, fp, headers=NOT_IMPL, + cb=NOT_IMPL, num_cb=NOT_IMPL, + torrent=NOT_IMPL, + version_id=NOT_IMPL, + res_download_handler=NOT_IMPL): + fp.write(self.data) + + def get_file(self, fp, headers=NOT_IMPL, cb=NOT_IMPL, num_cb=NOT_IMPL, + torrent=NOT_IMPL, version_id=NOT_IMPL, + override_num_retries=NOT_IMPL): + fp.write(self.data) + + def _handle_headers(self, headers): + if not headers: + return + if find_matching_headers('Content-Encoding', headers): + self.content_encoding = merge_headers_by_name('Content-Encoding', + headers) + if find_matching_headers('Content-Type', headers): + self.content_type = merge_headers_by_name('Content-Type', headers) + if find_matching_headers('Content-Language', headers): + self.content_language = merge_headers_by_name('Content-Language', + headers) + + # Simplistic partial implementation for headers: Just supports range GETs + # of flavor 'Range: bytes=xyz-'. + def open_read(self, headers=None, query_args=NOT_IMPL, + override_num_retries=NOT_IMPL): + if self.closed: + self.read_pos = 0 + self.closed = False + if headers and 'Range' in headers: + match = re.match('bytes=([0-9]+)-$', headers['Range']) + if match: + self.read_pos = int(match.group(1)) + + def close(self, fast=NOT_IMPL): + self.closed = True + + def read(self, size=0): + self.open_read() + if size == 0: + data = self.data[self.read_pos:] + self.read_pos = self.size + else: + data = self.data[self.read_pos:self.read_pos+size] + self.read_pos += size + if not data: + self.close() + return data + + def set_contents_from_file(self, fp, headers=None, replace=NOT_IMPL, + cb=NOT_IMPL, num_cb=NOT_IMPL, + policy=NOT_IMPL, md5=NOT_IMPL, + res_upload_handler=NOT_IMPL): + self.data = fp.read() + self.set_etag() + self.size = len(self.data) + self._handle_headers(headers) + + def set_contents_from_stream(self, fp, headers=None, replace=NOT_IMPL, + cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL, + reduced_redundancy=NOT_IMPL, query_args=NOT_IMPL, + size=NOT_IMPL): + self.data = '' + chunk = fp.read(self.BufferSize) + while chunk: + self.data += chunk + chunk = fp.read(self.BufferSize) + self.set_etag() + self.size = len(self.data) + self._handle_headers(headers) + + def set_contents_from_string(self, s, headers=NOT_IMPL, replace=NOT_IMPL, + cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL, + md5=NOT_IMPL, reduced_redundancy=NOT_IMPL): + self.data = copy.copy(s) + self.set_etag() + self.size = len(s) + self._handle_headers(headers) + + def set_contents_from_filename(self, filename, headers=None, + replace=NOT_IMPL, cb=NOT_IMPL, + num_cb=NOT_IMPL, policy=NOT_IMPL, + md5=NOT_IMPL, res_upload_handler=NOT_IMPL): + fp = open(filename, 'rb') + self.set_contents_from_file(fp, headers, replace, cb, num_cb, + policy, md5, res_upload_handler) + fp.close() + + def copy(self, dst_bucket_name, dst_key, metadata=NOT_IMPL, + reduced_redundancy=NOT_IMPL, preserve_acl=NOT_IMPL): + dst_bucket = self.bucket.connection.get_bucket(dst_bucket_name) + return dst_bucket.copy_key(dst_key, self.bucket.name, + self.name, metadata) + + @property + def provider(self): + provider = None + if self.bucket and self.bucket.connection: + provider = self.bucket.connection.provider + return provider + + def set_etag(self): + """ + Set etag attribute by generating hex MD5 checksum on current + contents of mock key. + """ + m = md5() + if not isinstance(self.data, bytes): + m.update(self.data.encode('utf-8')) + else: + m.update(self.data) + hex_md5 = m.hexdigest() + self.etag = hex_md5 + + def compute_md5(self, fp): + """ + :type fp: file + :param fp: File pointer to the file to MD5 hash. The file pointer + will be reset to the beginning of the file before the + method returns. + + :rtype: tuple + :return: A tuple containing the hex digest version of the MD5 hash + as the first element and the base64 encoded version of the + plain digest as the second element. + """ + tup = compute_md5(fp) + # Returned values are MD5 hash, base64 encoded MD5 hash, and file size. + # The internal implementation of compute_md5() needs to return the + # file size but we don't want to return that value to the external + # caller because it changes the class interface (i.e. it might + # break some code) so we consume the third tuple value here and + # return the remainder of the tuple to the caller, thereby preserving + # the existing interface. + self.size = tup[2] + return tup[0:2] + +class MockBucket(object): + + def __init__(self, connection=None, name=None, key_class=NOT_IMPL): + self.name = name + self.keys = {} + self.acls = {name: MockAcl()} + # default object ACLs are one per bucket and not supported for keys + self.def_acl = MockAcl() + self.subresources = {} + self.connection = connection + self.logging = False + + def __repr__(self): + return 'MockBucket: %s' % self.name + + def copy_key(self, new_key_name, src_bucket_name, + src_key_name, metadata=NOT_IMPL, src_version_id=NOT_IMPL, + storage_class=NOT_IMPL, preserve_acl=NOT_IMPL, + encrypt_key=NOT_IMPL, headers=NOT_IMPL, query_args=NOT_IMPL): + new_key = self.new_key(key_name=new_key_name) + src_key = self.connection.get_bucket( + src_bucket_name).get_key(src_key_name) + new_key.data = copy.copy(src_key.data) + new_key.size = len(new_key.data) + return new_key + + def disable_logging(self): + self.logging = False + + def enable_logging(self, target_bucket_prefix): + self.logging = True + + def get_logging_config(self): + return {"Logging": {}} + + def get_versioning_status(self, headers=NOT_IMPL): + return False + + def get_acl(self, key_name='', headers=NOT_IMPL, version_id=NOT_IMPL): + if key_name: + # Return ACL for the key. + return self.acls[key_name] + else: + # Return ACL for the bucket. + return self.acls[self.name] + + def get_def_acl(self, key_name=NOT_IMPL, headers=NOT_IMPL, + version_id=NOT_IMPL): + # Return default ACL for the bucket. + return self.def_acl + + def get_subresource(self, subresource, key_name=NOT_IMPL, headers=NOT_IMPL, + version_id=NOT_IMPL): + if subresource in self.subresources: + return self.subresources[subresource] + else: + return '' + + def new_key(self, key_name=None): + mock_key = MockKey(self, key_name) + self.keys[key_name] = mock_key + self.acls[key_name] = MockAcl() + return mock_key + + def delete_key(self, key_name, headers=NOT_IMPL, + version_id=NOT_IMPL, mfa_token=NOT_IMPL): + if key_name not in self.keys: + raise boto.exception.StorageResponseError(404, 'Not Found') + del self.keys[key_name] + + def get_all_keys(self, headers=NOT_IMPL): + return six.itervalues(self.keys) + + def get_key(self, key_name, headers=NOT_IMPL, version_id=NOT_IMPL): + # Emulate behavior of boto when get_key called with non-existent key. + if key_name not in self.keys: + return None + return self.keys[key_name] + + def list(self, prefix='', delimiter='', marker=NOT_IMPL, + headers=NOT_IMPL): + prefix = prefix or '' # Turn None into '' for prefix match. + # Return list instead of using a generator so we don't get + # 'dictionary changed size during iteration' error when performing + # deletions while iterating (e.g., during test cleanup). + result = [] + key_name_set = set() + for k in six.itervalues(self.keys): + if k.name.startswith(prefix): + k_name_past_prefix = k.name[len(prefix):] + if delimiter: + pos = k_name_past_prefix.find(delimiter) + else: + pos = -1 + if (pos != -1): + key_or_prefix = Prefix( + bucket=self, name=k.name[:len(prefix)+pos+1]) + else: + key_or_prefix = MockKey(bucket=self, name=k.name) + if key_or_prefix.name not in key_name_set: + key_name_set.add(key_or_prefix.name) + result.append(key_or_prefix) + return result + + def set_acl(self, acl_or_str, key_name='', headers=NOT_IMPL, + version_id=NOT_IMPL): + # We only handle setting ACL XML here; if you pass a canned ACL + # the get_acl call will just return that string name. + if key_name: + # Set ACL for the key. + self.acls[key_name] = MockAcl(acl_or_str) + else: + # Set ACL for the bucket. + self.acls[self.name] = MockAcl(acl_or_str) + + def set_def_acl(self, acl_or_str, key_name=NOT_IMPL, headers=NOT_IMPL, + version_id=NOT_IMPL): + # We only handle setting ACL XML here; if you pass a canned ACL + # the get_acl call will just return that string name. + # Set default ACL for the bucket. + self.def_acl = acl_or_str + + def set_subresource(self, subresource, value, key_name=NOT_IMPL, + headers=NOT_IMPL, version_id=NOT_IMPL): + self.subresources[subresource] = value + + +class MockProvider(object): + + def __init__(self, provider): + self.provider = provider + + def get_provider_name(self): + return self.provider + + +class MockConnection(object): + + def __init__(self, aws_access_key_id=NOT_IMPL, + aws_secret_access_key=NOT_IMPL, is_secure=NOT_IMPL, + port=NOT_IMPL, proxy=NOT_IMPL, proxy_port=NOT_IMPL, + proxy_user=NOT_IMPL, proxy_pass=NOT_IMPL, + host=NOT_IMPL, debug=NOT_IMPL, + https_connection_factory=NOT_IMPL, + calling_format=NOT_IMPL, + path=NOT_IMPL, provider='s3', + bucket_class=NOT_IMPL): + self.buckets = {} + self.provider = MockProvider(provider) + + def create_bucket(self, bucket_name, headers=NOT_IMPL, location=NOT_IMPL, + policy=NOT_IMPL, storage_class=NOT_IMPL): + if bucket_name in self.buckets: + raise boto.exception.StorageCreateError( + 409, 'BucketAlreadyOwnedByYou', + "Your previous request to create the named bucket " + "succeeded and you already own it.") + mock_bucket = MockBucket(name=bucket_name, connection=self) + self.buckets[bucket_name] = mock_bucket + return mock_bucket + + def delete_bucket(self, bucket, headers=NOT_IMPL): + if bucket not in self.buckets: + raise boto.exception.StorageResponseError( + 404, 'NoSuchBucket', 'no such bucket') + del self.buckets[bucket] + + def get_bucket(self, bucket_name, validate=NOT_IMPL, headers=NOT_IMPL): + if bucket_name not in self.buckets: + raise boto.exception.StorageResponseError(404, 'NoSuchBucket', + 'Not Found') + return self.buckets[bucket_name] + + def get_all_buckets(self, headers=NOT_IMPL): + return six.itervalues(self.buckets) + + +# We only mock a single provider/connection. +mock_connection = MockConnection() + + +class MockBucketStorageUri(object): + + delim = '/' + + def __init__(self, scheme, bucket_name=None, object_name=None, + debug=NOT_IMPL, suppress_consec_slashes=NOT_IMPL, + version_id=None, generation=None, is_latest=False): + self.scheme = scheme + self.bucket_name = bucket_name + self.object_name = object_name + self.suppress_consec_slashes = suppress_consec_slashes + if self.bucket_name and self.object_name: + self.uri = ('%s://%s/%s' % (self.scheme, self.bucket_name, + self.object_name)) + elif self.bucket_name: + self.uri = ('%s://%s/' % (self.scheme, self.bucket_name)) + else: + self.uri = ('%s://' % self.scheme) + + self.version_id = version_id + self.generation = generation and int(generation) + self.is_version_specific = (bool(self.generation) + or bool(self.version_id)) + self.is_latest = is_latest + if bucket_name and object_name: + self.versionless_uri = '%s://%s/%s' % (scheme, bucket_name, + object_name) + + def __repr__(self): + """Returns string representation of URI.""" + return self.uri + + def acl_class(self): + return MockAcl + + def canned_acls(self): + return boto.provider.Provider('aws').canned_acls + + def clone_replace_name(self, new_name): + return self.__class__(self.scheme, self.bucket_name, new_name) + + def clone_replace_key(self, key): + return self.__class__( + key.provider.get_provider_name(), + bucket_name=key.bucket.name, + object_name=key.name, + suppress_consec_slashes=self.suppress_consec_slashes, + version_id=getattr(key, 'version_id', None), + generation=getattr(key, 'generation', None), + is_latest=getattr(key, 'is_latest', None)) + + def connect(self, access_key_id=NOT_IMPL, secret_access_key=NOT_IMPL): + return mock_connection + + def create_bucket(self, headers=NOT_IMPL, location=NOT_IMPL, + policy=NOT_IMPL, storage_class=NOT_IMPL): + return self.connect().create_bucket(self.bucket_name) + + def delete_bucket(self, headers=NOT_IMPL): + return self.connect().delete_bucket(self.bucket_name) + + def get_versioning_config(self, headers=NOT_IMPL): + self.get_bucket().get_versioning_status(headers) + + def has_version(self): + return (issubclass(type(self), MockBucketStorageUri) + and ((self.version_id is not None) + or (self.generation is not None))) + + def delete_key(self, validate=NOT_IMPL, headers=NOT_IMPL, + version_id=NOT_IMPL, mfa_token=NOT_IMPL): + self.get_bucket().delete_key(self.object_name) + + def disable_logging(self, validate=NOT_IMPL, headers=NOT_IMPL, + version_id=NOT_IMPL): + self.get_bucket().disable_logging() + + def enable_logging(self, target_bucket, target_prefix, validate=NOT_IMPL, + headers=NOT_IMPL, version_id=NOT_IMPL): + self.get_bucket().enable_logging(target_bucket) + + def get_logging_config(self, validate=NOT_IMPL, headers=NOT_IMPL, + version_id=NOT_IMPL): + return self.get_bucket().get_logging_config() + + def equals(self, uri): + return self.uri == uri.uri + + def get_acl(self, validate=NOT_IMPL, headers=NOT_IMPL, version_id=NOT_IMPL): + return self.get_bucket().get_acl(self.object_name) + + def get_def_acl(self, validate=NOT_IMPL, headers=NOT_IMPL, + version_id=NOT_IMPL): + return self.get_bucket().get_def_acl(self.object_name) + + def get_subresource(self, subresource, validate=NOT_IMPL, headers=NOT_IMPL, + version_id=NOT_IMPL): + return self.get_bucket().get_subresource(subresource, self.object_name) + + def get_all_buckets(self, headers=NOT_IMPL): + return self.connect().get_all_buckets() + + def get_all_keys(self, validate=NOT_IMPL, headers=NOT_IMPL): + return self.get_bucket().get_all_keys(self) + + def list_bucket(self, prefix='', delimiter='', headers=NOT_IMPL, + all_versions=NOT_IMPL): + return self.get_bucket().list(prefix=prefix, delimiter=delimiter) + + def get_bucket(self, validate=NOT_IMPL, headers=NOT_IMPL): + return self.connect().get_bucket(self.bucket_name) + + def get_key(self, validate=NOT_IMPL, headers=NOT_IMPL, + version_id=NOT_IMPL): + return self.get_bucket().get_key(self.object_name) + + def is_file_uri(self): + return False + + def is_cloud_uri(self): + return True + + def names_container(self): + return bool(not self.object_name) + + def names_singleton(self): + return bool(self.object_name) + + def names_directory(self): + return False + + def names_provider(self): + return bool(not self.bucket_name) + + def names_bucket(self): + return self.names_container() + + def names_file(self): + return False + + def names_object(self): + return not self.names_container() + + def is_stream(self): + return False + + def new_key(self, validate=NOT_IMPL, headers=NOT_IMPL): + bucket = self.get_bucket() + return bucket.new_key(self.object_name) + + def set_acl(self, acl_or_str, key_name='', validate=NOT_IMPL, + headers=NOT_IMPL, version_id=NOT_IMPL): + self.get_bucket().set_acl(acl_or_str, key_name) + + def set_def_acl(self, acl_or_str, key_name=NOT_IMPL, validate=NOT_IMPL, + headers=NOT_IMPL, version_id=NOT_IMPL): + self.get_bucket().set_def_acl(acl_or_str) + + def set_subresource(self, subresource, value, validate=NOT_IMPL, + headers=NOT_IMPL, version_id=NOT_IMPL): + self.get_bucket().set_subresource(subresource, value, self.object_name) + + def copy_key(self, src_bucket_name, src_key_name, metadata=NOT_IMPL, + src_version_id=NOT_IMPL, storage_class=NOT_IMPL, + preserve_acl=NOT_IMPL, encrypt_key=NOT_IMPL, headers=NOT_IMPL, + query_args=NOT_IMPL, src_generation=NOT_IMPL): + dst_bucket = self.get_bucket() + return dst_bucket.copy_key(new_key_name=self.object_name, + src_bucket_name=src_bucket_name, + src_key_name=src_key_name) + + def set_contents_from_string(self, s, headers=NOT_IMPL, replace=NOT_IMPL, + cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL, + md5=NOT_IMPL, reduced_redundancy=NOT_IMPL): + key = self.new_key() + key.set_contents_from_string(s) + + def set_contents_from_file(self, fp, headers=None, replace=NOT_IMPL, + cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL, + md5=NOT_IMPL, size=NOT_IMPL, rewind=NOT_IMPL, + res_upload_handler=NOT_IMPL): + key = self.new_key() + return key.set_contents_from_file(fp, headers=headers) + + def set_contents_from_stream(self, fp, headers=NOT_IMPL, replace=NOT_IMPL, + cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL, + reduced_redundancy=NOT_IMPL, + query_args=NOT_IMPL, size=NOT_IMPL): + dst_key.set_contents_from_stream(fp) + + def get_contents_to_file(self, fp, headers=NOT_IMPL, cb=NOT_IMPL, + num_cb=NOT_IMPL, torrent=NOT_IMPL, + version_id=NOT_IMPL, res_download_handler=NOT_IMPL, + response_headers=NOT_IMPL): + key = self.get_key() + key.get_contents_to_file(fp) + + def get_contents_to_stream(self, fp, headers=NOT_IMPL, cb=NOT_IMPL, + num_cb=NOT_IMPL, version_id=NOT_IMPL): + key = self.get_key() + return key.get_contents_to_file(fp) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/other_cacerts.txt b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/other_cacerts.txt new file mode 100644 index 0000000000000000000000000000000000000000..360954a2868212ae1625b65f598c7ed76cdfc78b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/other_cacerts.txt @@ -0,0 +1,70 @@ +# Certifcate Authority certificates for validating SSL connections. +# +# This file contains PEM format certificates generated from +# http://mxr.mozilla.org/seamonkey/source/security/nss/lib/ckfw/builtins/certdata.txt +# +# ***** BEGIN LICENSE BLOCK ***** +# Version: MPL 1.1/GPL 2.0/LGPL 2.1 +# +# The contents of this file are subject to the Mozilla Public License Version +# 1.1 (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# http://www.mozilla.org/MPL/ +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License +# for the specific language governing rights and limitations under the +# License. +# +# The Original Code is the Netscape security libraries. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1994-2000 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# +# Alternatively, the contents of this file may be used under the terms of +# either the GNU General Public License Version 2 or later (the "GPL"), or +# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +# in which case the provisions of the GPL or the LGPL are applicable instead +# of those above. If you wish to allow use of your version of this file only +# under the terms of either the GPL or the LGPL, and not to allow others to +# use your version of this file under the terms of the MPL, indicate your +# decision by deleting the provisions above and replace them with the notice +# and other provisions required by the GPL or the LGPL. If you do not delete +# the provisions above, a recipient may use your version of this file under +# the terms of any one of the MPL, the GPL or the LGPL. +# +# ***** END LICENSE BLOCK ***** + + +Comodo CA Limited, CN=Trusted Certificate Services +================================================== + +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0 +aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEwMDAwMDBaFw0yODEyMzEyMzU5NTla +MH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO +BgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUwIwYD +VQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWW +fnJSoBVC21ndZHoa0Lh73TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMt +TGo87IvDktJTdyR0nAducPy9C1t2ul/y/9c3S0pgePfw+spwtOpZqqPOSC+pw7IL +fhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6juljatEPmsbS9Is6FARW +1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsSivnkBbA7 +kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0G +A1UdDgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21v +ZG9jYS5jb20vVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRo +dHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMu +Y3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8NtwuleGFTQQuS9/ +HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxIS +jBc/lDb+XbDABHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+ +xqFx7D+gIIxmOom0jtTYsU0lR+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/Atyjcn +dBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_bucket.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_bucket.py new file mode 100644 index 0000000000000000000000000000000000000000..849514405445888b37e71c5accb2a10b52ea5c79 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_bucket.py @@ -0,0 +1,301 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the S3 Bucket +""" + +from mock import patch, Mock +import unittest +import time + +from boto.exception import S3ResponseError +from boto.s3.connection import S3Connection +from boto.s3.bucketlogging import BucketLogging +from boto.s3.lifecycle import Lifecycle +from boto.s3.lifecycle import Transition +from boto.s3.lifecycle import Expiration +from boto.s3.lifecycle import Rule +from boto.s3.acl import Grant +from boto.s3.tagging import Tags, TagSet +from boto.s3.website import RedirectLocation +from boto.compat import urllib + + +class S3BucketTest (unittest.TestCase): + s3 = True + + def setUp(self): + self.conn = S3Connection() + self.bucket_name = 'bucket-%d' % int(time.time()) + self.bucket = self.conn.create_bucket(self.bucket_name) + + def tearDown(self): + for key in self.bucket: + key.delete() + self.bucket.delete() + + def test_next_marker(self): + expected = ["a/", "b", "c"] + for key_name in expected: + key = self.bucket.new_key(key_name) + key.set_contents_from_string(key_name) + + # Normal list of first 2 keys will have + # no NextMarker set, so we use last key to iterate + # last element will be "b" so no issue. + rs = self.bucket.get_all_keys(max_keys=2) + for element in rs: + pass + self.assertEqual(element.name, "b") + self.assertEqual(rs.next_marker, None) + + # list using delimiter of first 2 keys will have + # a NextMarker set (when truncated). As prefixes + # are grouped together at the end, we get "a/" as + # last element, but luckily we have next_marker. + rs = self.bucket.get_all_keys(max_keys=2, delimiter="/") + for element in rs: + pass + self.assertEqual(element.name, "a/") + self.assertEqual(rs.next_marker, "b") + + # ensure bucket.list() still works by just + # popping elements off the front of expected. + rs = self.bucket.list() + for element in rs: + self.assertEqual(element.name, expected.pop(0)) + self.assertEqual(expected, []) + + def test_list_with_url_encoding(self): + expected = ["α", "β", "γ"] + for key_name in expected: + key = self.bucket.new_key(key_name) + key.set_contents_from_string(key_name) + + # ensure bucket.list() still works by just + # popping elements off the front of expected. + orig_getall = self.bucket._get_all + getall = lambda *a, **k: orig_getall(*a, max_keys=2, **k) + with patch.object(self.bucket, '_get_all', getall): + rs = self.bucket.list(encoding_type="url") + for element in rs: + name = urllib.parse.unquote(element.name.encode('utf-8')) + self.assertEqual(name, expected.pop(0)) + self.assertEqual(expected, []) + + def test_logging(self): + # use self.bucket as the target bucket so that teardown + # will delete any log files that make it into the bucket + # automatically and all we have to do is delete the + # source bucket. + sb_name = "src-" + self.bucket_name + sb = self.conn.create_bucket(sb_name) + # grant log write perms to target bucket using canned-acl + self.bucket.set_acl("log-delivery-write") + target_bucket = self.bucket_name + target_prefix = u"jp/ログ/" + # Check existing status is disabled + bls = sb.get_logging_status() + self.assertEqual(bls.target, None) + # Create a logging status and grant auth users READ PERM + authuri = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" + authr = Grant(permission="READ", type="Group", uri=authuri) + sb.enable_logging(target_bucket, target_prefix=target_prefix, grants=[authr]) + # Check the status and confirm its set. + bls = sb.get_logging_status() + self.assertEqual(bls.target, target_bucket) + self.assertEqual(bls.prefix, target_prefix) + self.assertEqual(len(bls.grants), 1) + self.assertEqual(bls.grants[0].type, "Group") + self.assertEqual(bls.grants[0].uri, authuri) + # finally delete the src bucket + sb.delete() + + def test_tagging(self): + tagging = """ + + + + tagkey + tagvalue + + + + """ + self.bucket.set_xml_tags(tagging) + response = self.bucket.get_tags() + self.assertEqual(response[0][0].key, 'tagkey') + self.assertEqual(response[0][0].value, 'tagvalue') + self.bucket.delete_tags() + try: + self.bucket.get_tags() + except S3ResponseError as e: + self.assertEqual(e.code, 'NoSuchTagSet') + except Exception as e: + self.fail("Wrong exception raised (expected S3ResponseError): %s" + % e) + else: + self.fail("Expected S3ResponseError, but no exception raised.") + + def test_tagging_from_objects(self): + """Create tags from python objects rather than raw xml.""" + t = Tags() + tag_set = TagSet() + tag_set.add_tag('akey', 'avalue') + tag_set.add_tag('anotherkey', 'anothervalue') + t.add_tag_set(tag_set) + self.bucket.set_tags(t) + response = self.bucket.get_tags() + self.assertEqual(response[0][0].key, 'akey') + self.assertEqual(response[0][0].value, 'avalue') + self.assertEqual(response[0][1].key, 'anotherkey') + self.assertEqual(response[0][1].value, 'anothervalue') + + def test_website_configuration(self): + response = self.bucket.configure_website('index.html') + self.assertTrue(response) + config = self.bucket.get_website_configuration() + self.assertEqual(config, {'WebsiteConfiguration': + {'IndexDocument': {'Suffix': 'index.html'}}}) + config2, xml = self.bucket.get_website_configuration_with_xml() + self.assertEqual(config, config2) + self.assertTrue('index.html' in xml, xml) + + def test_website_redirect_all_requests(self): + response = self.bucket.configure_website( + redirect_all_requests_to=RedirectLocation('example.com')) + config = self.bucket.get_website_configuration() + self.assertEqual(config, { + 'WebsiteConfiguration': { + 'RedirectAllRequestsTo': { + 'HostName': 'example.com'}}}) + + # Can configure the protocol as well. + response = self.bucket.configure_website( + redirect_all_requests_to=RedirectLocation('example.com', 'https')) + config = self.bucket.get_website_configuration() + self.assertEqual(config, { + 'WebsiteConfiguration': {'RedirectAllRequestsTo': { + 'HostName': 'example.com', + 'Protocol': 'https', + }}} + ) + + def test_lifecycle(self): + lifecycle = Lifecycle() + lifecycle.add_rule('myid', '', 'Enabled', 30) + self.assertTrue(self.bucket.configure_lifecycle(lifecycle)) + response = self.bucket.get_lifecycle_config() + self.assertEqual(len(response), 1) + actual_lifecycle = response[0] + self.assertEqual(actual_lifecycle.id, 'myid') + self.assertEqual(actual_lifecycle.prefix, '') + self.assertEqual(actual_lifecycle.status, 'Enabled') + self.assertEqual(actual_lifecycle.transition, None) + + def test_lifecycle_with_glacier_transition(self): + lifecycle = Lifecycle() + transition = Transition(days=30, storage_class='GLACIER') + rule = Rule('myid', prefix='', status='Enabled', expiration=None, + transition=transition) + lifecycle.append(rule) + self.assertTrue(self.bucket.configure_lifecycle(lifecycle)) + response = self.bucket.get_lifecycle_config() + transition = response[0].transition + self.assertEqual(transition.days, 30) + self.assertEqual(transition.storage_class, 'GLACIER') + self.assertEqual(transition.date, None) + + def test_lifecycle_multi(self): + date = '2022-10-12T00:00:00.000Z' + sc = 'GLACIER' + lifecycle = Lifecycle() + lifecycle.add_rule("1", "1/", "Enabled", 1) + lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2)) + lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date)) + lifecycle.add_rule("4", "4/", "Enabled", None, + Transition(days=4, storage_class=sc)) + lifecycle.add_rule("5", "5/", "Enabled", None, + Transition(date=date, storage_class=sc)) + # set the lifecycle + self.bucket.configure_lifecycle(lifecycle) + # read the lifecycle back + readlifecycle = self.bucket.get_lifecycle_config(); + for rule in readlifecycle: + if rule.id == "1": + self.assertEqual(rule.prefix, "1/") + self.assertEqual(rule.expiration.days, 1) + elif rule.id == "2": + self.assertEqual(rule.prefix, "2/") + self.assertEqual(rule.expiration.days, 2) + elif rule.id == "3": + self.assertEqual(rule.prefix, "3/") + self.assertEqual(rule.expiration.date, date) + elif rule.id == "4": + self.assertEqual(rule.prefix, "4/") + self.assertEqual(rule.transition.days, 4) + self.assertEqual(rule.transition.storage_class, sc) + elif rule.id == "5": + self.assertEqual(rule.prefix, "5/") + self.assertEqual(rule.transition.date, date) + self.assertEqual(rule.transition.storage_class, sc) + else: + self.fail("unexpected id %s" % rule.id) + + def test_lifecycle_jp(self): + # test lifecycle with Japanese prefix + name = "Japanese files" + prefix = "日本語/" + days = 30 + lifecycle = Lifecycle() + lifecycle.add_rule(name, prefix, "Enabled", days) + # set the lifecycle + self.bucket.configure_lifecycle(lifecycle) + # read the lifecycle back + readlifecycle = self.bucket.get_lifecycle_config(); + for rule in readlifecycle: + self.assertEqual(rule.id, name) + self.assertEqual(rule.expiration.days, days) + #Note: Boto seems correct? AWS seems broken? + #self.assertEqual(rule.prefix, prefix) + + def test_lifecycle_with_defaults(self): + lifecycle = Lifecycle() + lifecycle.add_rule(expiration=30) + self.assertTrue(self.bucket.configure_lifecycle(lifecycle)) + response = self.bucket.get_lifecycle_config() + self.assertEqual(len(response), 1) + actual_lifecycle = response[0] + self.assertNotEqual(len(actual_lifecycle.id), 0) + self.assertEqual(actual_lifecycle.prefix, '') + + def test_lifecycle_rule_xml(self): + # create a rule directly with id, prefix defaults + rule = Rule(status='Enabled', expiration=30) + s = rule.to_xml() + # Confirm no ID is set in the rule. + self.assertEqual(s.find(""), -1) + # Confirm Prefix is '' and not set to 'None' + self.assertNotEqual(s.find(""), -1) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..7395d10a9127c4a13d592c487b7c1757819e3c08 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on SQS endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.s3 + + +class S3CertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + s3 = True + regions = boto.s3.regions() + + def sample_service_call(self, conn): + conn.get_all_buckets() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_connect_to_region.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_connect_to_region.py new file mode 100644 index 0000000000000000000000000000000000000000..5c76ada9413819a121f3bcb651a240502655780e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_connect_to_region.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2014 Steven Richards +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Unit test for passing in 'host' parameter and overriding the region +See issue: #2522 +""" +from tests.compat import unittest + +from boto.s3.connection import S3Connection +from boto.s3 import connect_to_region + +class S3SpecifyHost(unittest.TestCase): + s3 = True + + def testWithNonAWSHost(self): + connect_args = dict({'host':'www.not-a-website.com'}) + connection = connect_to_region('us-east-1', **connect_args) + self.assertEquals('www.not-a-website.com', connection.host) + self.assertIsInstance(connection, S3Connection) + + def testSuccessWithHostOverrideRegion(self): + connect_args = dict({'host':'s3.amazonaws.com'}) + connection = connect_to_region('us-west-2', **connect_args) + self.assertEquals('s3.amazonaws.com', connection.host) + self.assertIsInstance(connection, S3Connection) + + + def testSuccessWithDefaultUSWest1(self): + connection = connect_to_region('us-west-2') + self.assertEquals('s3-us-west-2.amazonaws.com', connection.host) + self.assertIsInstance(connection, S3Connection) + + def testSuccessWithDefaultUSEast1(self): + connection = connect_to_region('us-east-1') + self.assertEquals('s3.amazonaws.com', connection.host) + self.assertIsInstance(connection, S3Connection) + + def testDefaultWithInvalidHost(self): + connect_args = dict({'host':''}) + connection = connect_to_region('us-west-2', **connect_args) + self.assertEquals('s3-us-west-2.amazonaws.com', connection.host) + self.assertIsInstance(connection, S3Connection) + + def testDefaultWithInvalidHostNone(self): + connect_args = dict({'host':None}) + connection = connect_to_region('us-east-1', **connect_args) + self.assertEquals('s3.amazonaws.com', connection.host) + self.assertIsInstance(connection, S3Connection) + + def tearDown(self): + self = connection = connect_args = None diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..d7b848bb2e91b8ea3902a160104ddb2f51434341 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_connection.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the S3Connection +""" +import unittest +import time +import os + +from boto.s3.connection import S3Connection +from boto.s3.bucket import Bucket +from boto.exception import S3PermissionsError, S3ResponseError +from boto.compat import http_client, six, urlopen, urlsplit + + +class S3ConnectionTest (unittest.TestCase): + s3 = True + + def test_1_basic(self): + print('--- running S3Connection tests ---') + c = S3Connection() + # create a new, empty bucket + bucket_name = 'test-%d' % int(time.time()) + bucket = c.create_bucket(bucket_name) + # now try a get_bucket call and see if it's really there + bucket = c.get_bucket(bucket_name) + # test logging + logging_bucket = c.create_bucket(bucket_name + '-log') + logging_bucket.set_as_logging_target() + bucket.enable_logging(target_bucket=logging_bucket, target_prefix=bucket.name) + bucket.disable_logging() + c.delete_bucket(logging_bucket) + k = bucket.new_key('foobar') + s1 = 'This is a test of file upload and download' + s2 = 'This is a second string to test file upload and download' + k.set_contents_from_string(s1) + fp = open('foobar', 'wb') + # now get the contents from s3 to a local file + k.get_contents_to_file(fp) + fp.close() + fp = open('foobar') + # check to make sure content read from s3 is identical to original + assert s1 == fp.read(), 'corrupted file' + fp.close() + # test generated URLs + url = k.generate_url(3600) + file = urlopen(url) + assert s1 == file.read().decode('utf-8'), 'invalid URL %s' % url + url = k.generate_url(3600, force_http=True) + file = urlopen(url) + assert s1 == file.read().decode('utf-8'), 'invalid URL %s' % url + url = k.generate_url(3600, force_http=True, headers={'x-amz-x-token' : 'XYZ'}) + file = urlopen(url) + assert s1 == file.read().decode('utf-8'), 'invalid URL %s' % url + rh = {'response-content-disposition': 'attachment; filename="foo.txt"'} + url = k.generate_url(60, response_headers=rh) + file = urlopen(url) + assert s1 == file.read().decode('utf-8'), 'invalid URL %s' % url + #test whether amperands and to-be-escaped characters work in header filename + rh = {'response-content-disposition': 'attachment; filename="foo&z%20ar&ar&zar&bar.txt"'} + url = k.generate_url(60, response_headers=rh, force_http=True) + file = urlopen(url) + assert s1 == file.read().decode('utf-8'), 'invalid URL %s' % url + # overwrite foobar contents with a PUT + url = k.generate_url(3600, 'PUT', force_http=True, policy='private', reduced_redundancy=True) + up = urlsplit(url) + con = http_client.HTTPConnection(up.hostname, up.port) + con.request("PUT", up.path + '?' + up.query, body="hello there") + resp = con.getresponse() + assert 200 == resp.status + assert b"hello there" == k.get_contents_as_string() + bucket.delete_key(k) + # test a few variations on get_all_keys - first load some data + # for the first one, let's override the content type + phony_mimetype = 'application/x-boto-test' + headers = {'Content-Type': phony_mimetype} + k.name = 'foo/bar' + k.set_contents_from_string(s1, headers) + k.name = 'foo/bas' + size = k.set_contents_from_filename('foobar') + assert size == 42 + k.name = 'foo/bat' + k.set_contents_from_string(s1) + k.name = 'fie/bar' + k.set_contents_from_string(s1) + k.name = 'fie/bas' + k.set_contents_from_string(s1) + k.name = 'fie/bat' + k.set_contents_from_string(s1) + # try resetting the contents to another value + md5 = k.md5 + k.set_contents_from_string(s2) + assert k.md5 != md5 + os.unlink('foobar') + all = bucket.get_all_keys() + assert len(all) == 6 + rs = bucket.get_all_keys(prefix='foo') + assert len(rs) == 3 + rs = bucket.get_all_keys(prefix='', delimiter='/') + assert len(rs) == 2 + rs = bucket.get_all_keys(maxkeys=5) + assert len(rs) == 5 + # test the lookup method + k = bucket.lookup('foo/bar') + assert isinstance(k, bucket.key_class) + assert k.content_type == phony_mimetype + k = bucket.lookup('notthere') + assert k == None + # try some metadata stuff + k = bucket.new_key('has_metadata') + mdkey1 = 'meta1' + mdval1 = 'This is the first metadata value' + k.set_metadata(mdkey1, mdval1) + mdkey2 = 'meta2' + mdval2 = 'This is the second metadata value' + k.set_metadata(mdkey2, mdval2) + # try a unicode metadata value + mdval3 = u'föö' + mdkey3 = 'meta3' + k.set_metadata(mdkey3, mdval3) + k.set_contents_from_string(s1) + k = bucket.lookup('has_metadata') + assert k.get_metadata(mdkey1) == mdval1 + assert k.get_metadata(mdkey2) == mdval2 + assert k.get_metadata(mdkey3) == mdval3 + k = bucket.new_key('has_metadata') + k.get_contents_as_string() + assert k.get_metadata(mdkey1) == mdval1 + assert k.get_metadata(mdkey2) == mdval2 + assert k.get_metadata(mdkey3) == mdval3 + bucket.delete_key(k) + # test list and iterator + rs1 = bucket.list() + num_iter = 0 + for r in rs1: + num_iter = num_iter + 1 + rs = bucket.get_all_keys() + num_keys = len(rs) + assert num_iter == num_keys + # try a key with a funny character + k = bucket.new_key('testnewline\n') + k.set_contents_from_string('This is a test') + rs = bucket.get_all_keys() + assert len(rs) == num_keys + 1 + bucket.delete_key(k) + rs = bucket.get_all_keys() + assert len(rs) == num_keys + # try some acl stuff + bucket.set_acl('public-read') + policy = bucket.get_acl() + assert len(policy.acl.grants) == 2 + bucket.set_acl('private') + policy = bucket.get_acl() + assert len(policy.acl.grants) == 1 + k = bucket.lookup('foo/bar') + k.set_acl('public-read') + policy = k.get_acl() + assert len(policy.acl.grants) == 2 + k.set_acl('private') + policy = k.get_acl() + assert len(policy.acl.grants) == 1 + # try the convenience methods for grants + bucket.add_user_grant('FULL_CONTROL', + 'c1e724fbfa0979a4448393c59a8c055011f739b6d102fb37a65f26414653cd67') + try: + bucket.add_email_grant('foobar', 'foo@bar.com') + except S3PermissionsError: + pass + # now try to create an RRS key + k = bucket.new_key('reduced_redundancy') + k.set_contents_from_string('This key has reduced redundancy', + reduced_redundancy=True) + + # now try to inject a response header + data = k.get_contents_as_string(response_headers={'response-content-type' : 'foo/bar'}) + assert k.content_type == 'foo/bar' + + # now delete all keys in bucket + for k in bucket: + if k.name == 'reduced_redundancy': + assert k.storage_class == 'REDUCED_REDUNDANCY' + bucket.delete_key(k) + # now delete bucket + time.sleep(5) + c.delete_bucket(bucket) + print('--- tests completed ---') + + def test_basic_anon(self): + auth_con = S3Connection() + # create a new, empty bucket + bucket_name = 'test-%d' % int(time.time()) + auth_bucket = auth_con.create_bucket(bucket_name) + + # try read the bucket anonymously + anon_con = S3Connection(anon=True) + anon_bucket = Bucket(anon_con, bucket_name) + try: + next(iter(anon_bucket.list())) + self.fail("anon bucket list should fail") + except S3ResponseError: + pass + + # give bucket anon user access and anon read again + auth_bucket.set_acl('public-read') + time.sleep(5) + try: + next(iter(anon_bucket.list())) + self.fail("not expecting contents") + except S3ResponseError as e: + self.fail("We should have public-read access, but received " + "an error: %s" % e) + except StopIteration: + pass + + # cleanup + auth_con.delete_bucket(auth_bucket) + + def test_error_code_populated(self): + c = S3Connection() + try: + c.create_bucket('bad$bucket$name') + except S3ResponseError as e: + self.assertEqual(e.error_code, 'InvalidBucketName') + else: + self.fail("S3ResponseError not raised.") diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_cors.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_cors.py new file mode 100644 index 0000000000000000000000000000000000000000..84b12f0a0d443583409191736c57f772cfe62dd7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_cors.py @@ -0,0 +1,78 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some integration tests for S3 CORS +""" + +import unittest +import time + +from boto.s3.connection import S3Connection +from boto.exception import S3ResponseError +from boto.s3.cors import CORSConfiguration + + +class S3CORSTest (unittest.TestCase): + s3 = True + + def setUp(self): + self.conn = S3Connection() + self.bucket_name = 'cors-%d' % int(time.time()) + self.bucket = self.conn.create_bucket(self.bucket_name) + + def tearDown(self): + self.bucket.delete() + + def test_cors(self): + self.cfg = CORSConfiguration() + self.cfg.add_rule(['PUT', 'POST', 'DELETE'], + 'http://www.example.com', + allowed_header='*', max_age_seconds=3000, + expose_header='x-amz-server-side-encryption', + id='foobar_rule') + assert self.bucket.set_cors(self.cfg) + time.sleep(5) + cfg = self.bucket.get_cors() + for i, rule in enumerate(cfg): + self.assertEqual(rule.id, self.cfg[i].id) + self.assertEqual(rule.max_age_seconds, self.cfg[i].max_age_seconds) + methods = zip(rule.allowed_method, self.cfg[i].allowed_method) + for v1, v2 in methods: + self.assertEqual(v1, v2) + origins = zip(rule.allowed_origin, self.cfg[i].allowed_origin) + for v1, v2 in origins: + self.assertEqual(v1, v2) + headers = zip(rule.allowed_header, self.cfg[i].allowed_header) + for v1, v2 in headers: + self.assertEqual(v1, v2) + headers = zip(rule.expose_header, self.cfg[i].expose_header) + for v1, v2 in headers: + self.assertEqual(v1, v2) + self.bucket.delete_cors() + time.sleep(5) + try: + self.bucket.get_cors() + self.fail('CORS configuration should not be there') + except S3ResponseError: + pass diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_encryption.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_encryption.py new file mode 100644 index 0000000000000000000000000000000000000000..f48408b940f979049a854151791bbab554da5b85 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_encryption.py @@ -0,0 +1,114 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the S3 Encryption +""" +import unittest +import time +from boto.s3.connection import S3Connection +from boto.exception import S3ResponseError + +json_policy = """{ + "Version":"2008-10-17", + "Id":"PutObjPolicy", + "Statement":[{ + "Sid":"DenyUnEncryptedObjectUploads", + "Effect":"Deny", + "Principal":{ + "AWS":"*" + }, + "Action":"s3:PutObject", + "Resource":"arn:aws:s3:::%s/*", + "Condition":{ + "StringNotEquals":{ + "s3:x-amz-server-side-encryption":"AES256" + } + } + } + ] +}""" + +class S3EncryptionTest (unittest.TestCase): + s3 = True + + def test_1_versions(self): + print('--- running S3Encryption tests ---') + c = S3Connection() + # create a new, empty bucket + bucket_name = 'encryption-%d' % int(time.time()) + bucket = c.create_bucket(bucket_name) + + # now try a get_bucket call and see if it's really there + bucket = c.get_bucket(bucket_name) + + # create an unencrypted key + k = bucket.new_key('foobar') + s1 = 'This is unencrypted data' + s2 = 'This is encrypted data' + k.set_contents_from_string(s1) + time.sleep(5) + + # now get the contents from s3 + o = k.get_contents_as_string().decode('utf-8') + + # check to make sure content read from s3 is identical to original + assert o == s1 + + # now overwrite that same key with encrypted data + k.set_contents_from_string(s2, encrypt_key=True) + time.sleep(5) + + # now retrieve the contents as a string and compare + o = k.get_contents_as_string().decode('utf-8') + assert o == s2 + + # now set bucket policy to require encrypted objects + bucket.set_policy(json_policy % bucket.name) + time.sleep(5) + + # now try to write unencrypted key + write_failed = False + try: + k.set_contents_from_string(s1) + except S3ResponseError: + write_failed = True + + assert write_failed + + # now try to write unencrypted key + write_failed = False + try: + k.set_contents_from_string(s1, encrypt_key=True) + except S3ResponseError: + write_failed = True + + assert not write_failed + + # Now do regular delete + k.delete() + time.sleep(5) + + # now delete bucket + bucket.delete() + print('--- tests completed ---') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_https_cert_validation.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_https_cert_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..9222a4a7075929f119afb980736627e68a153b8d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_https_cert_validation.py @@ -0,0 +1,141 @@ +# Copyright 2011 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests to validate correct validation of SSL server certificates. + +Note that this test assumes two external dependencies are available: + - A http proxy, which by default is assumed to be at host 'cache' and port + 3128. This can be overridden with environment variables PROXY_HOST and + PROXY_PORT, respectively. + - An ssl-enabled web server that will return a valid certificate signed by one + of the bundled CAs, and which can be reached by an alternate hostname that + does not match the CN in that certificate. By default, this test uses host + 'www' (without fully qualified domain). This can be overridden with + environment variable INVALID_HOSTNAME_HOST. If no suitable host is already + available, such a mapping can be established by temporarily adding an IP + address for, say, www.google.com or www.amazon.com to /etc/hosts. +""" + +import os +import ssl +import unittest + +from nose.plugins.attrib import attr + +import boto +from boto import exception, https_connection +from boto.gs.connection import GSConnection +from boto.s3.connection import S3Connection + + +# File 'other_cacerts.txt' contains a valid CA certificate of a CA that is used +# by neither S3 nor Google Cloud Storage. Validation against this CA cert should +# result in a certificate error. +DEFAULT_CA_CERTS_FILE = os.path.join( + os.path.dirname(os.path.abspath(__file__ )), 'other_cacerts.txt') + + +PROXY_HOST = os.environ.get('PROXY_HOST', 'cache') +PROXY_PORT = os.environ.get('PROXY_PORT', '3128') + +# This test assumes that this host returns a certificate signed by one of the +# trusted CAs, but with a Common Name that won't match host name 'www' (i.e., +# the server should return a certificate with CN 'www..com'). +INVALID_HOSTNAME_HOST = os.environ.get('INVALID_HOSTNAME_HOST', 'www') + + +@attr('notdefault', 'ssl') +class CertValidationTest(unittest.TestCase): + def setUp(self): + # Clear config + for section in boto.config.sections(): + boto.config.remove_section(section) + + # Enable https_validate_certificates. + boto.config.add_section('Boto') + boto.config.setbool('Boto', 'https_validate_certificates', True) + + # Set up bogus credentials so that the auth module is willing to go + # ahead and make a request; the request should fail with a service-level + # error if it does get to the service (S3 or GS). + boto.config.add_section('Credentials') + boto.config.set('Credentials', 'gs_access_key_id', 'xyz') + boto.config.set('Credentials', 'gs_secret_access_key', 'xyz') + boto.config.set('Credentials', 'aws_access_key_id', 'xyz') + boto.config.set('Credentials', 'aws_secret_access_key', 'xyz') + + def enableProxy(self): + boto.config.set('Boto', 'proxy', PROXY_HOST) + boto.config.set('Boto', 'proxy_port', PROXY_PORT) + + def assertConnectionThrows(self, connection_class, error): + conn = connection_class() + self.assertRaises(error, conn.get_all_buckets) + + def do_test_valid_cert(self): + # When connecting to actual servers with bundled root certificates, no + # cert errors should be thrown; instead we will get "invalid + # credentials" errors since the config used does not contain any + # credentials. + self.assertConnectionThrows(S3Connection, exception.S3ResponseError) + self.assertConnectionThrows(GSConnection, exception.GSResponseError) + + def test_valid_cert(self): + self.do_test_valid_cert() + + def test_valid_cert_with_proxy(self): + self.enableProxy() + self.do_test_valid_cert() + + def do_test_invalid_signature(self): + boto.config.set('Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE) + self.assertConnectionThrows(S3Connection, ssl.SSLError) + self.assertConnectionThrows(GSConnection, ssl.SSLError) + + def test_invalid_signature(self): + self.do_test_invalid_signature() + + def test_invalid_signature_with_proxy(self): + self.enableProxy() + self.do_test_invalid_signature() + + def do_test_invalid_host(self): + boto.config.set('Credentials', 'gs_host', INVALID_HOSTNAME_HOST) + boto.config.set('Credentials', 's3_host', INVALID_HOSTNAME_HOST) + self.assertConnectionThrows(S3Connection, ssl.SSLError) + self.assertConnectionThrows(GSConnection, ssl.SSLError) + + def do_test_invalid_host(self): + boto.config.set('Credentials', 'gs_host', INVALID_HOSTNAME_HOST) + boto.config.set('Credentials', 's3_host', INVALID_HOSTNAME_HOST) + self.assertConnectionThrows( + S3Connection, https_connection.InvalidCertificateException) + self.assertConnectionThrows( + GSConnection, https_connection.InvalidCertificateException) + + def test_invalid_host(self): + self.do_test_invalid_host() + + def test_invalid_host_with_proxy(self): + self.enableProxy() + self.do_test_invalid_host() + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_key.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_key.py new file mode 100644 index 0000000000000000000000000000000000000000..8d426a267b8daa9ded2a22ef7e71d1f7cc5102c6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_key.py @@ -0,0 +1,534 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for S3 Key +""" + +from tests.unit import unittest +import time + +import boto.s3 +from boto.compat import six, StringIO, urllib +from boto.s3.connection import S3Connection +from boto.s3.key import Key +from boto.exception import S3ResponseError + + +class S3KeyTest(unittest.TestCase): + s3 = True + + def setUp(self): + self.conn = S3Connection() + self.bucket_name = 'keytest-%d' % int(time.time()) + self.bucket = self.conn.create_bucket(self.bucket_name) + + def tearDown(self): + for key in self.bucket: + key.delete() + self.bucket.delete() + + def test_set_contents_from_file_dataloss(self): + # Create an empty stringio and write to it. + content = "abcde" + sfp = StringIO() + sfp.write(content) + # Try set_contents_from_file() without rewinding sfp + k = self.bucket.new_key("k") + try: + k.set_contents_from_file(sfp) + self.fail("forgot to rewind so should fail.") + except AttributeError: + pass + # call with rewind and check if we wrote 5 bytes + k.set_contents_from_file(sfp, rewind=True) + self.assertEqual(k.size, 5) + # check actual contents by getting it. + kn = self.bucket.new_key("k") + ks = kn.get_contents_as_string().decode('utf-8') + self.assertEqual(ks, content) + + # finally, try with a 0 length string + sfp = StringIO() + k = self.bucket.new_key("k") + k.set_contents_from_file(sfp) + self.assertEqual(k.size, 0) + # check actual contents by getting it. + kn = self.bucket.new_key("k") + ks = kn.get_contents_as_string().decode('utf-8') + self.assertEqual(ks, "") + + def test_set_contents_as_file(self): + content="01234567890123456789" + sfp = StringIO(content) + + # fp is set at 0 for just opened (for read) files. + # set_contents should write full content to key. + k = self.bucket.new_key("k") + k.set_contents_from_file(sfp) + self.assertEqual(k.size, 20) + kn = self.bucket.new_key("k") + ks = kn.get_contents_as_string().decode('utf-8') + self.assertEqual(ks, content) + + # set fp to 5 and set contents. this should + # set "567890123456789" to the key + sfp.seek(5) + k = self.bucket.new_key("k") + k.set_contents_from_file(sfp) + self.assertEqual(k.size, 15) + kn = self.bucket.new_key("k") + ks = kn.get_contents_as_string().decode('utf-8') + self.assertEqual(ks, content[5:]) + + # set fp to 5 and only set 5 bytes. this should + # write the value "56789" to the key. + sfp.seek(5) + k = self.bucket.new_key("k") + k.set_contents_from_file(sfp, size=5) + self.assertEqual(k.size, 5) + self.assertEqual(sfp.tell(), 10) + kn = self.bucket.new_key("k") + ks = kn.get_contents_as_string().decode('utf-8') + self.assertEqual(ks, content[5:10]) + + def test_set_contents_with_md5(self): + content="01234567890123456789" + sfp = StringIO(content) + + # fp is set at 0 for just opened (for read) files. + # set_contents should write full content to key. + k = self.bucket.new_key("k") + good_md5 = k.compute_md5(sfp) + k.set_contents_from_file(sfp, md5=good_md5) + kn = self.bucket.new_key("k") + ks = kn.get_contents_as_string().decode('utf-8') + self.assertEqual(ks, content) + + # set fp to 5 and only set 5 bytes. this should + # write the value "56789" to the key. + sfp.seek(5) + k = self.bucket.new_key("k") + good_md5 = k.compute_md5(sfp, size=5) + k.set_contents_from_file(sfp, size=5, md5=good_md5) + self.assertEqual(sfp.tell(), 10) + kn = self.bucket.new_key("k") + ks = kn.get_contents_as_string().decode('utf-8') + self.assertEqual(ks, content[5:10]) + + # let's try a wrong md5 by just altering it. + k = self.bucket.new_key("k") + sfp.seek(0) + hexdig, base64 = k.compute_md5(sfp) + bad_md5 = (hexdig, base64[3:]) + try: + k.set_contents_from_file(sfp, md5=bad_md5) + self.fail("should fail with bad md5") + except S3ResponseError: + pass + + def test_get_contents_with_md5(self): + content="01234567890123456789" + sfp = StringIO(content) + + k = self.bucket.new_key("k") + k.set_contents_from_file(sfp) + kn = self.bucket.new_key("k") + s = kn.get_contents_as_string().decode('utf-8') + self.assertEqual(kn.md5, k.md5) + self.assertEqual(s, content) + + def test_file_callback(self): + def callback(wrote, total): + self.my_cb_cnt += 1 + self.assertNotEqual(wrote, self.my_cb_last, "called twice with same value") + self.my_cb_last = wrote + + # Zero bytes written => 1 call + self.my_cb_cnt = 0 + self.my_cb_last = None + k = self.bucket.new_key("k") + k.BufferSize = 2 + sfp = StringIO("") + k.set_contents_from_file(sfp, cb=callback, num_cb=10) + self.assertEqual(self.my_cb_cnt, 1) + self.assertEqual(self.my_cb_last, 0) + sfp.close() + + # Read back zero bytes => 1 call + self.my_cb_cnt = 0 + self.my_cb_last = None + s = k.get_contents_as_string(cb=callback) + self.assertEqual(self.my_cb_cnt, 1) + self.assertEqual(self.my_cb_last, 0) + + content="01234567890123456789" + sfp = StringIO(content) + + # expect 2 calls due start/finish + self.my_cb_cnt = 0 + self.my_cb_last = None + k = self.bucket.new_key("k") + k.set_contents_from_file(sfp, cb=callback, num_cb=10) + self.assertEqual(self.my_cb_cnt, 2) + self.assertEqual(self.my_cb_last, 20) + + # Read back all bytes => 2 calls + self.my_cb_cnt = 0 + self.my_cb_last = None + s = k.get_contents_as_string(cb=callback).decode('utf-8') + self.assertEqual(self.my_cb_cnt, 2) + self.assertEqual(self.my_cb_last, 20) + self.assertEqual(s, content) + + # rewind sfp and try upload again. -1 should call + # for every read/write so that should make 11 when bs=2 + sfp.seek(0) + self.my_cb_cnt = 0 + self.my_cb_last = None + k = self.bucket.new_key("k") + k.BufferSize = 2 + k.set_contents_from_file(sfp, cb=callback, num_cb=-1) + self.assertEqual(self.my_cb_cnt, 11) + self.assertEqual(self.my_cb_last, 20) + + # Read back all bytes => 11 calls + self.my_cb_cnt = 0 + self.my_cb_last = None + s = k.get_contents_as_string(cb=callback, num_cb=-1).decode('utf-8') + self.assertEqual(self.my_cb_cnt, 11) + self.assertEqual(self.my_cb_last, 20) + self.assertEqual(s, content) + + # no more than 1 times => 2 times + # last time always 20 bytes + sfp.seek(0) + self.my_cb_cnt = 0 + self.my_cb_last = None + k = self.bucket.new_key("k") + k.BufferSize = 2 + k.set_contents_from_file(sfp, cb=callback, num_cb=1) + self.assertTrue(self.my_cb_cnt <= 2) + self.assertEqual(self.my_cb_last, 20) + + # no more than 1 times => 2 times + self.my_cb_cnt = 0 + self.my_cb_last = None + s = k.get_contents_as_string(cb=callback, num_cb=1).decode('utf-8') + self.assertTrue(self.my_cb_cnt <= 2) + self.assertEqual(self.my_cb_last, 20) + self.assertEqual(s, content) + + # no more than 2 times + # last time always 20 bytes + sfp.seek(0) + self.my_cb_cnt = 0 + self.my_cb_last = None + k = self.bucket.new_key("k") + k.BufferSize = 2 + k.set_contents_from_file(sfp, cb=callback, num_cb=2) + self.assertTrue(self.my_cb_cnt <= 2) + self.assertEqual(self.my_cb_last, 20) + + # no more than 2 times + self.my_cb_cnt = 0 + self.my_cb_last = None + s = k.get_contents_as_string(cb=callback, num_cb=2).decode('utf-8') + self.assertTrue(self.my_cb_cnt <= 2) + self.assertEqual(self.my_cb_last, 20) + self.assertEqual(s, content) + + # no more than 3 times + # last time always 20 bytes + sfp.seek(0) + self.my_cb_cnt = 0 + self.my_cb_last = None + k = self.bucket.new_key("k") + k.BufferSize = 2 + k.set_contents_from_file(sfp, cb=callback, num_cb=3) + self.assertTrue(self.my_cb_cnt <= 3) + self.assertEqual(self.my_cb_last, 20) + + # no more than 3 times + self.my_cb_cnt = 0 + self.my_cb_last = None + s = k.get_contents_as_string(cb=callback, num_cb=3).decode('utf-8') + self.assertTrue(self.my_cb_cnt <= 3) + self.assertEqual(self.my_cb_last, 20) + self.assertEqual(s, content) + + # no more than 4 times + # last time always 20 bytes + sfp.seek(0) + self.my_cb_cnt = 0 + self.my_cb_last = None + k = self.bucket.new_key("k") + k.BufferSize = 2 + k.set_contents_from_file(sfp, cb=callback, num_cb=4) + self.assertTrue(self.my_cb_cnt <= 4) + self.assertEqual(self.my_cb_last, 20) + + # no more than 4 times + self.my_cb_cnt = 0 + self.my_cb_last = None + s = k.get_contents_as_string(cb=callback, num_cb=4).decode('utf-8') + self.assertTrue(self.my_cb_cnt <= 4) + self.assertEqual(self.my_cb_last, 20) + self.assertEqual(s, content) + + # no more than 6 times + # last time always 20 bytes + sfp.seek(0) + self.my_cb_cnt = 0 + self.my_cb_last = None + k = self.bucket.new_key("k") + k.BufferSize = 2 + k.set_contents_from_file(sfp, cb=callback, num_cb=6) + self.assertTrue(self.my_cb_cnt <= 6) + self.assertEqual(self.my_cb_last, 20) + + # no more than 6 times + self.my_cb_cnt = 0 + self.my_cb_last = None + s = k.get_contents_as_string(cb=callback, num_cb=6).decode('utf-8') + self.assertTrue(self.my_cb_cnt <= 6) + self.assertEqual(self.my_cb_last, 20) + self.assertEqual(s, content) + + # no more than 10 times + # last time always 20 bytes + sfp.seek(0) + self.my_cb_cnt = 0 + self.my_cb_last = None + k = self.bucket.new_key("k") + k.BufferSize = 2 + k.set_contents_from_file(sfp, cb=callback, num_cb=10) + self.assertTrue(self.my_cb_cnt <= 10) + self.assertEqual(self.my_cb_last, 20) + + # no more than 10 times + self.my_cb_cnt = 0 + self.my_cb_last = None + s = k.get_contents_as_string(cb=callback, num_cb=10).decode('utf-8') + self.assertTrue(self.my_cb_cnt <= 10) + self.assertEqual(self.my_cb_last, 20) + self.assertEqual(s, content) + + # no more than 1000 times + # last time always 20 bytes + sfp.seek(0) + self.my_cb_cnt = 0 + self.my_cb_last = None + k = self.bucket.new_key("k") + k.BufferSize = 2 + k.set_contents_from_file(sfp, cb=callback, num_cb=1000) + self.assertTrue(self.my_cb_cnt <= 1000) + self.assertEqual(self.my_cb_last, 20) + + # no more than 1000 times + self.my_cb_cnt = 0 + self.my_cb_last = None + s = k.get_contents_as_string(cb=callback, num_cb=1000).decode('utf-8') + self.assertTrue(self.my_cb_cnt <= 1000) + self.assertEqual(self.my_cb_last, 20) + self.assertEqual(s, content) + + def test_website_redirects(self): + self.bucket.configure_website('index.html') + key = self.bucket.new_key('redirect-key') + self.assertTrue(key.set_redirect('http://www.amazon.com/')) + self.assertEqual(key.get_redirect(), 'http://www.amazon.com/') + + self.assertTrue(key.set_redirect('http://aws.amazon.com/')) + self.assertEqual(key.get_redirect(), 'http://aws.amazon.com/') + + def test_website_redirect_none_configured(self): + key = self.bucket.new_key('redirect-key') + key.set_contents_from_string('') + self.assertEqual(key.get_redirect(), None) + + def test_website_redirect_with_bad_value(self): + self.bucket.configure_website('index.html') + key = self.bucket.new_key('redirect-key') + with self.assertRaises(key.provider.storage_response_error): + # Must start with a / or http + key.set_redirect('ftp://ftp.example.org') + with self.assertRaises(key.provider.storage_response_error): + # Must start with a / or http + key.set_redirect('') + + def test_setting_date(self): + key = self.bucket.new_key('test_date') + # This should actually set x-amz-meta-date & not fail miserably. + key.set_metadata('date', '20130524T155935Z') + key.set_contents_from_string('Some text here.') + + check = self.bucket.get_key('test_date') + self.assertEqual(check.get_metadata('date'), u'20130524T155935Z') + self.assertTrue('x-amz-meta-date' in check._get_remote_metadata()) + + def test_header_casing(self): + key = self.bucket.new_key('test_header_case') + # Using anything but CamelCase on ``Content-Type`` or ``Content-MD5`` + # used to cause a signature error (when using ``s3`` for signing). + key.set_metadata('Content-type', 'application/json') + key.set_metadata('Content-md5', 'XmUKnus7svY1frWsVskxXg==') + key.set_contents_from_string('{"abc": 123}') + + check = self.bucket.get_key('test_header_case') + self.assertEqual(check.content_type, 'application/json') + + def test_header_encoding(self): + key = self.bucket.new_key('test_header_encoding') + + key.set_metadata('Cache-control', u'public, max-age=500') + key.set_metadata('Test-Plus', u'A plus (+)') + key.set_metadata('Content-disposition', u'filename=Schöne Zeit.txt') + key.set_metadata('Content-Encoding', 'gzip') + key.set_metadata('Content-Language', 'de') + key.set_metadata('Content-Type', 'application/pdf') + self.assertEqual(key.content_type, 'application/pdf') + key.set_metadata('X-Robots-Tag', 'all') + key.set_metadata('Expires', u'Thu, 01 Dec 1994 16:00:00 GMT') + key.set_contents_from_string('foo') + + check = self.bucket.get_key('test_header_encoding') + remote_metadata = check._get_remote_metadata() + + # TODO: investigate whether encoding ' ' as '%20' makes sense + self.assertEqual(check.cache_control, 'public,%20max-age=500') + self.assertEqual(remote_metadata['cache-control'], 'public,%20max-age=500') + self.assertEqual(check.get_metadata('test-plus'), 'A plus (+)') + self.assertEqual(check.content_disposition, 'filename=Sch%C3%B6ne%20Zeit.txt') + self.assertEqual(remote_metadata['content-disposition'], 'filename=Sch%C3%B6ne%20Zeit.txt') + self.assertEqual(check.content_encoding, 'gzip') + self.assertEqual(remote_metadata['content-encoding'], 'gzip') + self.assertEqual(check.content_language, 'de') + self.assertEqual(remote_metadata['content-language'], 'de') + self.assertEqual(check.content_type, 'application/pdf') + self.assertEqual(remote_metadata['content-type'], 'application/pdf') + self.assertEqual(check.x_robots_tag, 'all') + self.assertEqual(remote_metadata['x-robots-tag'], 'all') + self.assertEqual(check.expires, 'Thu,%2001%20Dec%201994%2016:00:00%20GMT') + self.assertEqual(remote_metadata['expires'], 'Thu,%2001%20Dec%201994%2016:00:00%20GMT') + + expected = u'filename=Schöne Zeit.txt' + if six.PY2: + # Newer versions of python default to unicode strings, but python 2 + # requires encoding to UTF-8 to compare the two properly + expected = expected.encode('utf-8') + + self.assertEqual( + urllib.parse.unquote(check.content_disposition), + expected + ) + + def test_set_contents_with_sse_c(self): + content="01234567890123456789" + # the plain text of customer key is "01testKeyToSSEC!" + header = { + "x-amz-server-side-encryption-customer-algorithm" : + "AES256", + "x-amz-server-side-encryption-customer-key" : + "MAAxAHQAZQBzAHQASwBlAHkAVABvAFMAUwBFAEMAIQA=", + "x-amz-server-side-encryption-customer-key-MD5" : + "fUgCZDDh6bfEMuP2bN38mg==" + } + # upload and download content with AWS specified headers + k = self.bucket.new_key("testkey_for_sse_c") + k.set_contents_from_string(content, headers=header) + kn = self.bucket.new_key("testkey_for_sse_c") + ks = kn.get_contents_as_string(headers=header) + self.assertEqual(ks, content.encode('utf-8')) + + +class S3KeySigV4Test(unittest.TestCase): + def setUp(self): + self.conn = boto.s3.connect_to_region('eu-central-1') + self.bucket_name = 'boto-sigv4-key-%d' % int(time.time()) + self.bucket = self.conn.create_bucket(self.bucket_name, + location='eu-central-1') + + def tearDown(self): + for key in self.bucket: + key.delete() + self.bucket.delete() + + def test_put_get_with_non_string_headers_key(self): + k = Key(self.bucket) + k.key = 'foobar' + body = 'This is a test of S3' + # A content-length header will be added to this request since it + # has a body. + k.set_contents_from_string(body) + # Set a header that has an integer. This checks for a bug where + # the sigv4 signer assumes that all of the headers are strings. + headers = {'Content-Length': 0} + from_s3_key = self.bucket.get_key('foobar', headers=headers) + self.assertEqual(from_s3_key.get_contents_as_string().decode('utf-8'), + body) + + +class S3KeyVersionCopyTest(unittest.TestCase): + def setUp(self): + self.conn = S3Connection() + self.bucket_name = 'boto-key-version-copy-%d' % int(time.time()) + self.bucket = self.conn.create_bucket(self.bucket_name) + self.bucket.configure_versioning(True) + + def tearDown(self): + for key in self.bucket.list_versions(): + key.delete() + self.bucket.delete() + + def test_key_overwrite_and_copy(self): + first_content = "abcdefghijklm" + second_content = "nopqrstuvwxyz" + k = Key(self.bucket, 'testkey') + k.set_contents_from_string(first_content) + # Wait for S3's eventual consistency (may not be necessary) + while self.bucket.get_key('testkey') is None: + time.sleep(5) + # Get the first version_id + first_key = self.bucket.get_key('testkey') + first_version_id = first_key.version_id + # Overwrite the key + k = Key(self.bucket, 'testkey') + k.set_contents_from_string(second_content) + # Wait for eventual consistency + while True: + second_key = self.bucket.get_key('testkey') + if second_key is None or second_key.version_id == first_version_id: + time.sleep(5) + else: + break + # Copy first key (no longer the current version) to a new key + source_key = self.bucket.get_key('testkey', + version_id=first_version_id) + source_key.copy(self.bucket, 'copiedkey') + while self.bucket.get_key('copiedkey') is None: + time.sleep(5) + copied_key = self.bucket.get_key('copiedkey') + copied_key_contents = copied_key.get_contents_as_string() + self.assertEqual(first_content, copied_key_contents) + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_mfa.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_mfa.py new file mode 100644 index 0000000000000000000000000000000000000000..1d6d62fc9e028e94ebf3a3e87a6fe7bfd90467dc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_mfa.py @@ -0,0 +1,95 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for S3 MfaDelete with versioning +""" + +import unittest +import time +from nose.plugins.attrib import attr + +from boto.s3.connection import S3Connection +from boto.exception import S3ResponseError +from boto.s3.deletemarker import DeleteMarker + + +@attr('notdefault', 's3mfa') +class S3MFATest (unittest.TestCase): + + def setUp(self): + self.conn = S3Connection() + self.bucket_name = 'mfa-%d' % int(time.time()) + self.bucket = self.conn.create_bucket(self.bucket_name) + + def tearDown(self): + for k in self.bucket.list_versions(): + self.bucket.delete_key(k.name, version_id=k.version_id) + self.bucket.delete() + + def test_mfadel(self): + # Enable Versioning with MfaDelete + mfa_sn = raw_input('MFA S/N: ') + mfa_code = raw_input('MFA Code: ') + self.bucket.configure_versioning(True, mfa_delete=True, mfa_token=(mfa_sn, mfa_code)) + + # Check enabling mfa worked. + i = 0 + for i in range(1, 8): + time.sleep(2**i) + d = self.bucket.get_versioning_status() + if d['Versioning'] == 'Enabled' and d['MfaDelete'] == 'Enabled': + break + self.assertEqual('Enabled', d['Versioning']) + self.assertEqual('Enabled', d['MfaDelete']) + + # Add a key to the bucket + k = self.bucket.new_key('foobar') + s1 = 'This is v1' + k.set_contents_from_string(s1) + v1 = k.version_id + + # Now try to delete v1 without the MFA token + try: + self.bucket.delete_key('foobar', version_id=v1) + self.fail("Must fail if not using MFA token") + except S3ResponseError: + pass + + # Now try delete again with the MFA token + mfa_code = raw_input('MFA Code: ') + self.bucket.delete_key('foobar', version_id=v1, mfa_token=(mfa_sn, mfa_code)) + + # Next suspend versioning and disable MfaDelete on the bucket + mfa_code = raw_input('MFA Code: ') + self.bucket.configure_versioning(False, mfa_delete=False, mfa_token=(mfa_sn, mfa_code)) + + # Lastly, check disabling mfa worked. + i = 0 + for i in range(1, 8): + time.sleep(2**i) + d = self.bucket.get_versioning_status() + if d['Versioning'] == 'Suspended' and d['MfaDelete'] != 'Enabled': + break + self.assertEqual('Suspended', d['Versioning']) + self.assertNotEqual('Enabled', d['MfaDelete']) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_multidelete.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_multidelete.py new file mode 100644 index 0000000000000000000000000000000000000000..b22581bbae3354df258fd927fd54c94c883001f5 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_multidelete.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the S3 MultiDelete +""" + +import unittest +import time +from boto.s3.key import Key +from boto.s3.deletemarker import DeleteMarker +from boto.s3.prefix import Prefix +from boto.s3.connection import S3Connection +from boto.exception import S3ResponseError + +class S3MultiDeleteTest(unittest.TestCase): + s3 = True + + def setUp(self): + self.conn = S3Connection() + self.bucket_name = 'multidelete-%d' % int(time.time()) + self.bucket = self.conn.create_bucket(self.bucket_name) + + def tearDown(self): + for key in self.bucket: + key.delete() + self.bucket.delete() + + def test_delete_nothing(self): + result = self.bucket.delete_keys([]) + self.assertEqual(len(result.deleted), 0) + self.assertEqual(len(result.errors), 0) + + def test_delete_illegal(self): + result = self.bucket.delete_keys([{"dict":"notallowed"}]) + self.assertEqual(len(result.deleted), 0) + self.assertEqual(len(result.errors), 1) + + def test_delete_mix(self): + result = self.bucket.delete_keys(["king", + ("mice", None), + Key(name="regular"), + Key(), + Prefix(name="folder/"), + DeleteMarker(name="deleted"), + {"bad":"type"}]) + self.assertEqual(len(result.deleted), 4) + self.assertEqual(len(result.errors), 3) + + def test_delete_quietly(self): + result = self.bucket.delete_keys(["king"], quiet=True) + self.assertEqual(len(result.deleted), 0) + self.assertEqual(len(result.errors), 0) + + def test_delete_must_escape(self): + result = self.bucket.delete_keys([Key(name=">_<;")]) + self.assertEqual(len(result.deleted), 1) + self.assertEqual(len(result.errors), 0) + + def test_delete_unknown_version(self): + no_ver = Key(name="no") + no_ver.version_id = "version" + result = self.bucket.delete_keys([no_ver]) + self.assertEqual(len(result.deleted), 0) + self.assertEqual(len(result.errors), 1) + + def test_delete_kanji(self): + result = self.bucket.delete_keys([u"漢字", Key(name=u"日本語")]) + self.assertEqual(len(result.deleted), 2) + self.assertEqual(len(result.errors), 0) + + def test_delete_empty_by_list(self): + result = self.bucket.delete_keys(self.bucket.list()) + self.assertEqual(len(result.deleted), 0) + self.assertEqual(len(result.errors), 0) + + def test_delete_kanji_by_list(self): + for key_name in [u"漢字", u"日本語", u"テスト"]: + key = self.bucket.new_key(key_name) + key.set_contents_from_string('this is a test') + result = self.bucket.delete_keys(self.bucket.list()) + self.assertEqual(len(result.deleted), 3) + self.assertEqual(len(result.errors), 0) + + def test_delete_with_prefixes(self): + for key_name in ["a", "a/b", "b"]: + key = self.bucket.new_key(key_name) + key.set_contents_from_string('this is a test') + + # First delete all "files": "a" and "b" + result = self.bucket.delete_keys(self.bucket.list(delimiter="/")) + self.assertEqual(len(result.deleted), 2) + # Using delimiter will cause 1 common prefix to be listed + # which will be skipped as an error. + self.assertEqual(len(result.errors), 1) + self.assertEqual(result.errors[0].key, "a/") + + # Next delete any remaining objects: "a/b" + result = self.bucket.delete_keys(self.bucket.list()) + self.assertEqual(len(result.deleted), 1) + self.assertEqual(len(result.errors), 0) + self.assertEqual(result.deleted[0].key, "a/b") + + def test_delete_too_many_versions(self): + # configure versioning first + self.bucket.configure_versioning(True) + + # Add 1000 initial versions as DMs by deleting them :-) + # Adding 1000 objects is painful otherwise... + key_names = ['key-%03d' % i for i in range(0, 1000)] + result = self.bucket.delete_keys(key_names) + self.assertEqual(len(result.deleted), 1000) + self.assertEqual(len(result.errors), 0) + + # delete them again to create 1000 more delete markers + result = self.bucket.delete_keys(key_names) + self.assertEqual(len(result.deleted), 1000) + self.assertEqual(len(result.errors), 0) + + # Sometimes takes AWS sometime to settle + time.sleep(10) + + # delete all versions to delete 2000 objects. + # this tests the 1000 limit. + result = self.bucket.delete_keys(self.bucket.list_versions()) + self.assertEqual(len(result.deleted), 2000) + self.assertEqual(len(result.errors), 0) + + def test_1(self): + nkeys = 100 + + # create a bunch of keynames + key_names = ['key-%03d' % i for i in range(0, nkeys)] + + # create the corresponding keys + for key_name in key_names: + key = self.bucket.new_key(key_name) + key.set_contents_from_string('this is a test') + + # now count keys in bucket + n = 0 + for key in self.bucket: + n += 1 + + self.assertEqual(n, nkeys) + + # now delete them all + result = self.bucket.delete_keys(key_names) + + self.assertEqual(len(result.deleted), nkeys) + self.assertEqual(len(result.errors), 0) + + time.sleep(5) + + # now count keys in bucket + n = 0 + for key in self.bucket: + n += 1 + + self.assertEqual(n, 0) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_multipart.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_multipart.py new file mode 100644 index 0000000000000000000000000000000000000000..78647963cf74dd71780a87d96fb2f7c2add5b54a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_multipart.py @@ -0,0 +1,229 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the S3 MultiPartUpload +""" + +# Note: +# Multipart uploads require at least one part. If you upload +# multiple parts then all parts except the last part has to be +# bigger than 5M. Hence we just use 1 part so we can keep +# things small and still test logic. + +import os +import unittest +import time +from boto.compat import StringIO + +import mock + +import boto +from boto.s3.connection import S3Connection + + +class S3MultiPartUploadTest(unittest.TestCase): + s3 = True + + def setUp(self): + self.conn = S3Connection(is_secure=False) + self.bucket_name = 'multipart-%d' % int(time.time()) + self.bucket = self.conn.create_bucket(self.bucket_name) + + def tearDown(self): + for key in self.bucket: + key.delete() + self.bucket.delete() + + def test_abort(self): + key_name = u"テスト" + mpu = self.bucket.initiate_multipart_upload(key_name) + mpu.cancel_upload() + + def test_complete_ascii(self): + key_name = "test" + mpu = self.bucket.initiate_multipart_upload(key_name) + fp = StringIO("small file") + mpu.upload_part_from_file(fp, part_num=1) + fp.close() + cmpu = mpu.complete_upload() + self.assertEqual(cmpu.key_name, key_name) + self.assertNotEqual(cmpu.etag, None) + + def test_complete_japanese(self): + key_name = u"テスト" + mpu = self.bucket.initiate_multipart_upload(key_name) + fp = StringIO("small file") + mpu.upload_part_from_file(fp, part_num=1) + fp.close() + cmpu = mpu.complete_upload() + self.assertEqual(cmpu.key_name, key_name) + self.assertNotEqual(cmpu.etag, None) + + def test_list_japanese(self): + key_name = u"テスト" + mpu = self.bucket.initiate_multipart_upload(key_name) + rs = self.bucket.list_multipart_uploads() + # New bucket, so only one upload expected + lmpu = next(iter(rs)) + self.assertEqual(lmpu.id, mpu.id) + self.assertEqual(lmpu.key_name, key_name) + # Abort using the one returned in the list + lmpu.cancel_upload() + + def test_list_multipart_uploads(self): + key_name = u"テスト" + mpus = [] + mpus.append(self.bucket.initiate_multipart_upload(key_name)) + mpus.append(self.bucket.initiate_multipart_upload(key_name)) + rs = self.bucket.list_multipart_uploads() + # uploads (for a key) are returned in time initiated asc order + for lmpu in rs: + ompu = mpus.pop(0) + self.assertEqual(lmpu.key_name, ompu.key_name) + self.assertEqual(lmpu.id, ompu.id) + self.assertEqual(0, len(mpus)) + + def test_get_all_multipart_uploads(self): + key1 = 'a' + key2 = 'b/c' + mpu1 = self.bucket.initiate_multipart_upload(key1) + mpu2 = self.bucket.initiate_multipart_upload(key2) + rs = self.bucket.get_all_multipart_uploads(prefix='b/', delimiter='/') + for lmpu in rs: + # only expect upload for key2 (mpu2) returned + self.assertEqual(lmpu.key_name, mpu2.key_name) + self.assertEqual(lmpu.id, mpu2.id) + + def test_four_part_file(self): + key_name = "k" + contents = "01234567890123456789" + sfp = StringIO(contents) + + # upload 20 bytes in 4 parts of 5 bytes each + mpu = self.bucket.initiate_multipart_upload(key_name) + mpu.upload_part_from_file(sfp, part_num=1, size=5) + mpu.upload_part_from_file(sfp, part_num=2, size=5) + mpu.upload_part_from_file(sfp, part_num=3, size=5) + mpu.upload_part_from_file(sfp, part_num=4, size=5) + sfp.close() + + etags = {} + pn = 0 + for part in mpu: + pn += 1 + self.assertEqual(5, part.size) + etags[pn] = part.etag + self.assertEqual(pn, 4) + # etags for 01234 + self.assertEqual(etags[1], etags[3]) + # etags for 56789 + self.assertEqual(etags[2], etags[4]) + # etag 01234 != etag 56789 + self.assertNotEqual(etags[1], etags[2]) + + # parts are too small to compete as each part must + # be a min of 5MB so so we'll assume that is enough + # testing and abort the upload. + mpu.cancel_upload() + + # mpu.upload_part_from_file() now returns the uploaded part + # which makes the etag available. Confirm the etag is + # available and equal to the etag returned by the parts list. + def test_etag_of_parts(self): + key_name = "etagtest" + mpu = self.bucket.initiate_multipart_upload(key_name) + fp = StringIO("small file") + # upload 2 parts and save each part + uparts = [] + uparts.append(mpu.upload_part_from_file(fp, part_num=1, size=5)) + uparts.append(mpu.upload_part_from_file(fp, part_num=2)) + fp.close() + # compare uploaded parts etag to listed parts + pn = 0 + for lpart in mpu: + self.assertEqual(uparts[pn].etag, lpart.etag) + pn += 1 + # Can't complete 2 small parts so just clean up. + mpu.cancel_upload() + + +class S3MultiPartUploadSigV4Test(unittest.TestCase): + s3 = True + + def setUp(self): + self.env_patch = mock.patch('os.environ', {'S3_USE_SIGV4': True}) + self.env_patch.start() + self.conn = boto.s3.connect_to_region('us-west-2') + self.bucket_name = 'multipart-%d' % int(time.time()) + self.bucket = self.conn.create_bucket(self.bucket_name, + location='us-west-2') + + def tearDown(self): + for key in self.bucket: + key.delete() + self.bucket.delete() + self.env_patch.stop() + + def test_initiate_multipart(self): + key_name = "multipart" + multipart_upload = self.bucket.initiate_multipart_upload(key_name) + multipart_uploads = self.bucket.get_all_multipart_uploads() + for upload in multipart_uploads: + # Check that the multipart upload was created. + self.assertEqual(upload.key_name, multipart_upload.key_name) + self.assertEqual(upload.id, multipart_upload.id) + multipart_upload.cancel_upload() + + def test_upload_part_by_size(self): + key_name = "k" + contents = "01234567890123456789" + sfp = StringIO(contents) + + # upload 20 bytes in 4 parts of 5 bytes each + mpu = self.bucket.initiate_multipart_upload(key_name) + mpu.upload_part_from_file(sfp, part_num=1, size=5) + mpu.upload_part_from_file(sfp, part_num=2, size=5) + mpu.upload_part_from_file(sfp, part_num=3, size=5) + mpu.upload_part_from_file(sfp, part_num=4, size=5) + sfp.close() + + etags = {} + pn = 0 + for part in mpu: + pn += 1 + self.assertEqual(5, part.size) + etags[pn] = part.etag + self.assertEqual(pn, 4) + # etags for 01234 + self.assertEqual(etags[1], etags[3]) + # etags for 56789 + self.assertEqual(etags[2], etags[4]) + # etag 01234 != etag 56789 + self.assertNotEqual(etags[1], etags[2]) + + # parts are too small to complete as each part must + # be a min of 5MB so so we'll assume that is enough + # testing and abort the upload. + mpu.cancel_upload() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_pool.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..2f8bdbc82f5c050696e6700b008a2c31e3ad76a7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_pool.py @@ -0,0 +1,245 @@ +# Copyright (c) 2011 Brian Beach +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some multi-threading tests of boto in a greenlet environment. +""" +from __future__ import print_function + +import boto +import time +import uuid + +from threading import Thread + +def spawn(function, *args, **kwargs): + """ + Spawns a new thread. API is the same as + gevent.greenlet.Greenlet.spawn. + """ + t = Thread(target = function, args = args, kwargs = kwargs) + t.start() + return t + +def put_object(bucket, name): + bucket.new_key(name).set_contents_from_string(name) + +def get_object(bucket, name): + assert bucket.get_key(name).get_contents_as_string().decode('utf-8') == name + +def test_close_connections(): + """ + A test that exposes the problem where connections are returned to the + connection pool (and closed) before the caller reads the response. + + I couldn't think of a way to test it without greenlets, so this test + doesn't run as part of the standard test suite. That way, no more + dependencies are added to the test suite. + """ + + print("Running test_close_connections") + + # Connect to S3 + s3 = boto.connect_s3() + + # Clean previous tests. + for b in s3.get_all_buckets(): + if b.name.startswith('test-'): + for key in b.get_all_keys(): + key.delete() + b.delete() + + # Make a test bucket + bucket = s3.create_bucket('test-%d' % int(time.time())) + + # Create 30 threads that each create an object in S3. The number + # 30 is chosen because it is larger than the connection pool size + # (20). + names = [str(uuid.uuid4) for _ in range(30)] + threads = [ + spawn(put_object, bucket, name) + for name in names + ] + for t in threads: + t.join() + + # Create 30 threads to read the contents of the new objects. This + # is where closing the connection early is a problem, because + # there is a response that needs to be read, and it can't be read + # if the connection has already been closed. + threads = [ + spawn(get_object, bucket, name) + for name in names + ] + for t in threads: + t.join() + +# test_reuse_connections needs to read a file that is big enough that +# one read() call on the socket won't read the whole thing. +BIG_SIZE = 10000 + +class WriteAndCount(object): + + """ + A file-like object that counts the number of characters written. + """ + + def __init__(self): + self.size = 0 + + def write(self, data): + self.size += len(data) + time.sleep(0) # yield to other threads + +def read_big_object(s3, bucket, name, count): + for _ in range(count): + key = bucket.get_key(name) + out = WriteAndCount() + key.get_contents_to_file(out) + if out.size != BIG_SIZE: + print(out.size, BIG_SIZE) + assert out.size == BIG_SIZE + print(" pool size:", s3._pool.size()) + +class LittleQuerier(object): + + """ + An object that manages a thread that keeps pulling down small + objects from S3 and checking the answers until told to stop. + """ + + def __init__(self, bucket, small_names): + self.running = True + self.bucket = bucket + self.small_names = small_names + self.thread = spawn(self.run) + + def stop(self): + self.running = False + self.thread.join() + + def run(self): + count = 0 + while self.running: + i = count % 4 + key = self.bucket.get_key(self.small_names[i]) + expected = str(i) + rh = { 'response-content-type' : 'small/' + str(i) } + actual = key.get_contents_as_string(response_headers = rh).decode('utf-8') + if expected != actual: + print("AHA:", repr(expected), repr(actual)) + assert expected == actual + count += 1 + +def test_reuse_connections(): + """ + This test is an attempt to expose problems because of the fact + that boto returns connections to the connection pool before + reading the response. The strategy is to start a couple big reads + from S3, where it will take time to read the response, and then + start other requests that will reuse the same connection from the + pool while the big response is still being read. + + The test passes because of an interesting combination of factors. + I was expecting that it would fail because two threads would be + reading the same connection at the same time. That doesn't happen + because httplib catches the problem before it happens and raises + an exception. + + Here's the sequence of events: + + - Thread 1: Send a request to read a big S3 object. + - Thread 1: Returns connection to pool. + - Thread 1: Start reading the body if the response. + + - Thread 2: Get the same connection from the pool. + - Thread 2: Send another request on the same connection. + - Thread 2: Try to read the response, but + HTTPConnection.get_response notices that the + previous response isn't done reading yet, and + raises a ResponseNotReady exception. + - Thread 2: _mexe catches the exception, does not return the + connection to the pool, gets a new connection, and + retries. + + - Thread 1: Finish reading the body of its response. + + - Server: Gets the second request on the connection, and + sends a response. This response is ignored because + the connection has been dropped on the client end. + + If you add a print statement in HTTPConnection.get_response at the + point where it raises ResponseNotReady, and then run this test, + you can see that it's happening. + """ + + print("Running test_reuse_connections") + + # Connect to S3 + s3 = boto.connect_s3() + + # Make a test bucket + bucket = s3.create_bucket('test-%d' % int(time.time())) + + # Create some small objects in S3. + small_names = [str(uuid.uuid4()) for _ in range(4)] + for (i, name) in enumerate(small_names): + bucket.new_key(name).set_contents_from_string(str(i)) + + # Wait, clean the connection pool, and make sure it's empty. + print(" waiting for all connections to become stale") + time.sleep(s3._pool.STALE_DURATION + 1) + s3._pool.clean() + assert s3._pool.size() == 0 + print(" pool is empty") + + # Create a big object in S3. + big_name = str(uuid.uuid4()) + contents = "-" * BIG_SIZE + bucket.new_key(big_name).set_contents_from_string(contents) + + # Start some threads to read it and check that they are reading + # the correct thing. Each thread will read the object 40 times. + threads = [ + spawn(read_big_object, s3, bucket, big_name, 20) + for _ in range(5) + ] + + # Do some other things that may (incorrectly) re-use the same + # connections while the big objects are being read. + queriers = [ + LittleQuerier(bucket, small_names) + for _ in range(5) + ] + + # Clean up. + for t in threads: + t.join() + for q in queriers: + q.stop() + +def main(): + test_close_connections() + test_reuse_connections() + +if __name__ == '__main__': + main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_versioning.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_versioning.py new file mode 100644 index 0000000000000000000000000000000000000000..6758212ef63a88937639d53a85f5081310192518 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/s3/test_versioning.py @@ -0,0 +1,159 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the S3 Versioning. +""" + +import unittest +import time +from boto.s3.connection import S3Connection +from boto.exception import S3ResponseError +from boto.s3.deletemarker import DeleteMarker +from boto.compat import six + +class S3VersionTest (unittest.TestCase): + + def setUp(self): + self.conn = S3Connection() + self.bucket_name = 'version-%d' % int(time.time()) + self.bucket = self.conn.create_bucket(self.bucket_name) + + def tearDown(self): + for k in self.bucket.list_versions(): + self.bucket.delete_key(k.name, version_id=k.version_id) + self.bucket.delete() + + def test_1_versions(self): + # check versioning off + d = self.bucket.get_versioning_status() + self.assertFalse('Versioning' in d) + + # enable versioning + self.bucket.configure_versioning(versioning=True) + d = self.bucket.get_versioning_status() + self.assertEqual('Enabled', d['Versioning']) + + # create a new key in the versioned bucket + k = self.bucket.new_key("foobar") + s1 = 'This is v1' + k.set_contents_from_string(s1) + + # remember the version id of this object + v1 = k.version_id + + # now get the contents from s3 + o1 = k.get_contents_as_string().decode('utf-8') + + # check to make sure content read from k is identical to original + self.assertEqual(s1, o1) + + # now overwrite that same key with new data + s2 = 'This is v2' + k.set_contents_from_string(s2) + v2 = k.version_id + + # now retrieve latest contents as a string and compare + k2 = self.bucket.new_key("foobar") + o2 = k2.get_contents_as_string().decode('utf-8') + self.assertEqual(s2, o2) + + # next retrieve explicit versions and compare + o1 = k.get_contents_as_string(version_id=v1).decode('utf-8') + o2 = k.get_contents_as_string(version_id=v2).decode('utf-8') + self.assertEqual(s1, o1) + self.assertEqual(s2, o2) + + # Now list all versions and compare to what we have + rs = self.bucket.get_all_versions() + self.assertEqual(v2, rs[0].version_id) + self.assertEqual(v1, rs[1].version_id) + + # Now do a regular list command and make sure only the new key shows up + rs = self.bucket.get_all_keys() + self.assertEqual(1, len(rs)) + + # Now do regular delete + self.bucket.delete_key('foobar') + + # Now list versions and make sure old versions are there + # plus the DeleteMarker which is latest. + rs = self.bucket.get_all_versions() + self.assertEqual(3, len(rs)) + self.assertTrue(isinstance(rs[0], DeleteMarker)) + + # Now delete v1 of the key + self.bucket.delete_key('foobar', version_id=v1) + + # Now list versions again and make sure v1 is not there + rs = self.bucket.get_all_versions() + versions = [k.version_id for k in rs] + self.assertTrue(v1 not in versions) + self.assertTrue(v2 in versions) + + # Now suspend Versioning on the bucket + self.bucket.configure_versioning(False) + # Allow time for the change to fully propagate. + time.sleep(3) + d = self.bucket.get_versioning_status() + self.assertEqual('Suspended', d['Versioning']) + + def test_latest_version(self): + self.bucket.configure_versioning(versioning=True) + + # add v1 of an object + key_name = "key" + kv1 = self.bucket.new_key(key_name) + kv1.set_contents_from_string("v1") + + # read list which should contain latest v1 + listed_kv1 = next(iter(self.bucket.get_all_versions())) + self.assertEqual(listed_kv1.name, key_name) + self.assertEqual(listed_kv1.version_id, kv1.version_id) + self.assertEqual(listed_kv1.is_latest, True) + + # add v2 of the object + kv2 = self.bucket.new_key(key_name) + kv2.set_contents_from_string("v2") + + # read 2 versions, confirm v2 is latest + i = iter(self.bucket.get_all_versions()) + listed_kv2 = next(i) + listed_kv1 = next(i) + self.assertEqual(listed_kv2.version_id, kv2.version_id) + self.assertEqual(listed_kv1.version_id, kv1.version_id) + self.assertEqual(listed_kv2.is_latest, True) + self.assertEqual(listed_kv1.is_latest, False) + + # delete key, which creates a delete marker as latest + self.bucket.delete_key(key_name) + i = iter(self.bucket.get_all_versions()) + listed_kv3 = next(i) + listed_kv2 = next(i) + listed_kv1 = next(i) + self.assertNotEqual(listed_kv3.version_id, None) + self.assertEqual(listed_kv2.version_id, kv2.version_id) + self.assertEqual(listed_kv1.version_id, kv1.version_id) + self.assertEqual(listed_kv3.is_latest, True) + self.assertEqual(listed_kv2.is_latest, False) + self.assertEqual(listed_kv1.is_latest, False) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sdb/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sdb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b3fc3a0c31bff21cc9c8e0a0189f02cee820e64e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sdb/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sdb/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sdb/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..ebb8949f2ad5fd41ea8f3f4ea3d107d6e94ee0ba --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sdb/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.sdb + + +class SDBCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + sdb = True + regions = boto.sdb.regions() + + def sample_service_call(self, conn): + conn.get_all_domains() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sdb/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sdb/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..03e83b3401afe65a127c6f3a642f10c739f284ac --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sdb/test_connection.py @@ -0,0 +1,118 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the SDBConnection +""" + +import unittest +import time +from boto.sdb.connection import SDBConnection +from boto.exception import SDBResponseError + +class SDBConnectionTest (unittest.TestCase): + sdb = True + + def test_1_basic(self): + print('--- running SDBConnection tests ---') + c = SDBConnection() + rs = c.get_all_domains() + num_domains = len(rs) + + # try illegal name + try: + domain = c.create_domain('bad:domain:name') + except SDBResponseError: + pass + + # now create one that should work and should be unique (i.e. a new one) + domain_name = 'test%d' % int(time.time()) + domain = c.create_domain(domain_name) + rs = c.get_all_domains() + assert len(rs) == num_domains + 1 + + # now let's a couple of items and attributes + item_1 = 'item1' + same_value = 'same_value' + attrs_1 = {'name1': same_value, 'name2': 'diff_value_1'} + domain.put_attributes(item_1, attrs_1) + item_2 = 'item2' + attrs_2 = {'name1': same_value, 'name2': 'diff_value_2'} + domain.put_attributes(item_2, attrs_2) + + # try to get the attributes and see if they match + item = domain.get_attributes(item_1, consistent_read=True) + assert len(item.keys()) == len(attrs_1.keys()) + assert item['name1'] == attrs_1['name1'] + assert item['name2'] == attrs_1['name2'] + + # try a search or two + query = 'select * from %s where name1="%s"' % (domain_name, same_value) + rs = domain.select(query, consistent_read=True) + n = 0 + for item in rs: + n += 1 + assert n == 2 + query = 'select * from %s where name2="diff_value_2"' % domain_name + rs = domain.select(query, consistent_read=True) + n = 0 + for item in rs: + n += 1 + assert n == 1 + + # delete all attributes associated with item_1 + stat = domain.delete_attributes(item_1) + assert stat + + # now try a batch put operation on the domain + item3 = {'name3_1': 'value3_1', + 'name3_2': 'value3_2', + 'name3_3': ['value3_3_1', 'value3_3_2']} + + item4 = {'name4_1': 'value4_1', + 'name4_2': ['value4_2_1', 'value4_2_2'], + 'name4_3': 'value4_3'} + items = {'item3': item3, 'item4': item4} + domain.batch_put_attributes(items) + + item = domain.get_attributes('item3', consistent_read=True) + assert item['name3_2'] == 'value3_2' + + # now try a batch delete operation (variation #1) + items = {'item3': item3} + stat = domain.batch_delete_attributes(items) + + item = domain.get_attributes('item3', consistent_read=True) + assert not item + + # now try a batch delete operation (variation #2) + stat = domain.batch_delete_attributes({'item4': None}) + + item = domain.get_attributes('item4', consistent_read=True) + assert not item + + # now delete the domain + stat = c.delete_domain(domain) + assert stat + + print('--- tests completed ---') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ses/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ses/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ses/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..7add339d8fcace6ed27988547c3d6205e05d0d1a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ses/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.ses + + +class SESCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + ses = True + regions = boto.ses.regions() + + def sample_service_call(self, conn): + conn.list_verified_email_addresses() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/ses/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/ses/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..4e849e9fcc77110f1b1f36ec56775501f64d2740 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/ses/test_connection.py @@ -0,0 +1,38 @@ +from tests.unit import unittest + +from boto.ses.connection import SESConnection +from boto.ses import exceptions + + +class SESConnectionTest(unittest.TestCase): + ses = True + + def setUp(self): + self.ses = SESConnection() + + def test_get_dkim_attributes(self): + response = self.ses.get_identity_dkim_attributes(['example.com']) + # Verify we get the structure we expect, we don't care about the + # values. + self.assertTrue('GetIdentityDkimAttributesResponse' in response) + self.assertTrue('GetIdentityDkimAttributesResult' in + response['GetIdentityDkimAttributesResponse']) + self.assertTrue( + 'DkimAttributes' in response['GetIdentityDkimAttributesResponse'] + ['GetIdentityDkimAttributesResult']) + + def test_set_identity_dkim_enabled(self): + # This api call should fail because have not verified the domain, + # so we can test that it at least fails we we expect. + with self.assertRaises(exceptions.SESIdentityNotVerifiedError): + self.ses.set_identity_dkim_enabled('example.com', True) + + def test_verify_domain_dkim(self): + # This api call should fail because have not confirmed the domain, + # so we can test that it at least fails we we expect. + with self.assertRaises(exceptions.SESDomainNotConfirmedError): + self.ses.verify_domain_dkim('example.com') + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sns/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sns/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b3fc3a0c31bff21cc9c8e0a0189f02cee820e64e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sns/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sns/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sns/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..53b6b9f2acfa3caa50d0e7517655e8ed0ded0805 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sns/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on SQS endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.sns + + +class SNSCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + sns = True + regions = boto.sns.regions() + + def sample_service_call(self, conn): + conn.get_all_topics() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sns/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sns/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..6a359b1b7f803eea24bf1edeaacb26753553245b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sns/test_connection.py @@ -0,0 +1,68 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from tests.compat import mock, unittest +from boto.compat import http_client +from boto.sns import connect_to_region + + +class StubResponse(object): + status = 403 + reason = 'nopenopenope' + + def getheader(self, val): + return b'' + + def getheaders(self): + return b'' + + def read(self): + return b'' + + +class TestSNSConnection(unittest.TestCase): + + sns = True + + def setUp(self): + self.connection = connect_to_region('us-west-2') + + def test_list_platform_applications(self): + response = self.connection.list_platform_applications() + + def test_forced_host(self): + # This test asserts that the ``Host`` header is correctly set. + # On Python 2.5(.6), not having this in place would cause any SigV4 + # calls to fail, due to a signature mismatch (the port would be present + # when it shouldn't be). + https = http_client.HTTPConnection + mpo = mock.patch.object + + with mpo(https, 'request') as mock_request: + with mpo(https, 'getresponse', return_value=StubResponse()): + with self.assertRaises(self.connection.ResponseError): + self.connection.list_platform_applications() + + # Now, assert that the ``Host`` was there & correct. + call = mock_request.call_args_list[0] + headers = call[0][3] + self.assertTrue('Host' in headers) + self.assertEqual(headers['Host'], 'sns.us-west-2.amazonaws.com') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sns/test_sns_sqs_subscription.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sns/test_sns_sqs_subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..a3656b9658f294d098d96ab36c5722781ff7488b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sns/test_sns_sqs_subscription.py @@ -0,0 +1,101 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Unit tests for subscribing SQS queues to SNS topics. +""" + +import hashlib +import time + +from tests.unit import unittest + +from boto.compat import json +from boto.sqs.connection import SQSConnection +from boto.sns.connection import SNSConnection + +class SNSSubcribeSQSTest(unittest.TestCase): + + sqs = True + sns = True + + def setUp(self): + self.sqsc = SQSConnection() + self.snsc = SNSConnection() + + def get_policy_statements(self, queue): + attrs = queue.get_attributes('Policy') + policy = json.loads(attrs.get('Policy', "{}")) + return policy.get('Statement', {}) + + def test_correct_sid(self): + now = time.time() + topic_name = queue_name = "test_correct_sid%d" % (now) + + timeout = 60 + queue = self.sqsc.create_queue(queue_name, timeout) + self.addCleanup(self.sqsc.delete_queue, queue, True) + queue_arn = queue.arn + + topic = self.snsc.create_topic(topic_name) + topic_arn = topic['CreateTopicResponse']['CreateTopicResult']\ + ['TopicArn'] + self.addCleanup(self.snsc.delete_topic, topic_arn) + + expected_sid = hashlib.md5((topic_arn + queue_arn).encode('utf-8')).hexdigest() + resp = self.snsc.subscribe_sqs_queue(topic_arn, queue) + + found_expected_sid = False + statements = self.get_policy_statements(queue) + for statement in statements: + if statement['Sid'] == expected_sid: + found_expected_sid = True + break + self.assertTrue(found_expected_sid) + + def test_idempotent_subscribe(self): + now = time.time() + topic_name = queue_name = "test_idempotent_subscribe%d" % (now) + + timeout = 60 + queue = self.sqsc.create_queue(queue_name, timeout) + self.addCleanup(self.sqsc.delete_queue, queue, True) + initial_statements = self.get_policy_statements(queue) + queue_arn = queue.arn + + topic = self.snsc.create_topic(topic_name) + topic_arn = topic['CreateTopicResponse']['CreateTopicResult']\ + ['TopicArn'] + self.addCleanup(self.snsc.delete_topic, topic_arn) + + resp = self.snsc.subscribe_sqs_queue(topic_arn, queue) + time.sleep(3) + first_subscribe_statements = self.get_policy_statements(queue) + self.assertEqual(len(first_subscribe_statements), + len(initial_statements) + 1) + + resp2 = self.snsc.subscribe_sqs_queue(topic_arn, queue) + time.sleep(3) + second_subscribe_statements = self.get_policy_statements(queue) + self.assertEqual(len(second_subscribe_statements), + len(first_subscribe_statements)) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b3fc3a0c31bff21cc9c8e0a0189f02cee820e64e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/test_bigmessage.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/test_bigmessage.py new file mode 100644 index 0000000000000000000000000000000000000000..bb52dde145eead4850c2e42b4eabed35014d6dd7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/test_bigmessage.py @@ -0,0 +1,78 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the SQSConnection +""" +import time +from threading import Timer +from tests.unit import unittest + +import boto +from boto.compat import StringIO +from boto.sqs.bigmessage import BigMessage +from boto.exception import SQSError + + +class TestBigMessage(unittest.TestCase): + + sqs = True + + def test_1_basic(self): + c = boto.connect_sqs() + + # create a queue so we can test BigMessage + queue_name = 'test%d' % int(time.time()) + timeout = 60 + queue = c.create_queue(queue_name, timeout) + self.addCleanup(c.delete_queue, queue, True) + queue.set_message_class(BigMessage) + + # create a bucket with the same name to store the message in + s3 = boto.connect_s3() + bucket = s3.create_bucket(queue_name) + self.addCleanup(s3.delete_bucket, queue_name) + time.sleep(30) + + # now add a message + msg_body = 'This is a test of the big message' + fp = StringIO(msg_body) + s3_url = 's3://%s' % queue_name + message = queue.new_message(fp, s3_url=s3_url) + + queue.write(message) + time.sleep(30) + + s3_object_name = message.s3_url.split('/')[-1] + + # Make sure msg body is in bucket + self.assertTrue(bucket.lookup(s3_object_name)) + + m = queue.read() + self.assertEqual(m.get_body().decode('utf-8'), msg_body) + + m.delete() + time.sleep(30) + + # Make sure msg is deleted from bucket + self.assertIsNone(bucket.lookup(s3_object_name)) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..f815e4c34bb9861f05f0079ef396013a928fd74b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on SQS endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.sqs + + +class SQSCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + sqs = True + regions = boto.sqs.regions() + + def sample_service_call(self, conn): + conn.get_all_queues() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..5ab80924d39eec92a197c427f07a09f5ea818ae2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sqs/test_connection.py @@ -0,0 +1,304 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the SQSConnection +""" +import time +from threading import Timer +from tests.unit import unittest + +from boto.sqs.connection import SQSConnection +from boto.sqs.message import Message +from boto.sqs.message import MHMessage +from boto.exception import SQSError + + +class SQSConnectionTest(unittest.TestCase): + + sqs = True + + def test_1_basic(self): + print('--- running SQSConnection tests ---') + c = SQSConnection() + rs = c.get_all_queues() + num_queues = 0 + for q in rs: + num_queues += 1 + + # try illegal name + try: + queue = c.create_queue('bad*queue*name') + self.fail('queue name should have been bad') + except SQSError: + pass + + # now create one that should work and should be unique (i.e. a new one) + queue_name = 'test%d' % int(time.time()) + timeout = 60 + queue_1 = c.create_queue(queue_name, timeout) + self.addCleanup(c.delete_queue, queue_1, True) + time.sleep(60) + rs = c.get_all_queues() + i = 0 + for q in rs: + i += 1 + assert i == num_queues + 1 + assert queue_1.count_slow() == 0 + + # check the visibility timeout + t = queue_1.get_timeout() + assert t == timeout, '%d != %d' % (t, timeout) + + # now try to get queue attributes + a = q.get_attributes() + assert 'ApproximateNumberOfMessages' in a + assert 'VisibilityTimeout' in a + a = q.get_attributes('ApproximateNumberOfMessages') + assert 'ApproximateNumberOfMessages' in a + assert 'VisibilityTimeout' not in a + a = q.get_attributes('VisibilityTimeout') + assert 'ApproximateNumberOfMessages' not in a + assert 'VisibilityTimeout' in a + + # now change the visibility timeout + timeout = 45 + queue_1.set_timeout(timeout) + time.sleep(60) + t = queue_1.get_timeout() + assert t == timeout, '%d != %d' % (t, timeout) + + # now add a message + message_body = 'This is a test\n' + message = queue_1.new_message(message_body) + queue_1.write(message) + time.sleep(60) + assert queue_1.count_slow() == 1 + time.sleep(90) + + # now read the message from the queue with a 10 second timeout + message = queue_1.read(visibility_timeout=10) + assert message + assert message.get_body() == message_body + + # now immediately try another read, shouldn't find anything + message = queue_1.read() + assert message == None + + # now wait 30 seconds and try again + time.sleep(30) + message = queue_1.read() + assert message + + # now delete the message + queue_1.delete_message(message) + time.sleep(30) + assert queue_1.count_slow() == 0 + + # try a batch write + num_msgs = 10 + msgs = [(i, 'This is message %d' % i, 0) for i in range(num_msgs)] + queue_1.write_batch(msgs) + + # try to delete all of the messages using batch delete + deleted = 0 + while deleted < num_msgs: + time.sleep(5) + msgs = queue_1.get_messages(num_msgs) + if msgs: + br = queue_1.delete_message_batch(msgs) + deleted += len(br.results) + + # create another queue so we can test force deletion + # we will also test MHMessage with this queue + queue_name = 'test%d' % int(time.time()) + timeout = 60 + queue_2 = c.create_queue(queue_name, timeout) + self.addCleanup(c.delete_queue, queue_2, True) + queue_2.set_message_class(MHMessage) + time.sleep(30) + + # now add a couple of messages + message = queue_2.new_message() + message['foo'] = 'bar' + queue_2.write(message) + message_body = {'fie': 'baz', 'foo': 'bar'} + message = queue_2.new_message(body=message_body) + queue_2.write(message) + time.sleep(30) + + m = queue_2.read() + assert m['foo'] == 'bar' + + print('--- tests completed ---') + + def test_sqs_timeout(self): + c = SQSConnection() + queue_name = 'test_sqs_timeout_%s' % int(time.time()) + queue = c.create_queue(queue_name) + self.addCleanup(c.delete_queue, queue, True) + start = time.time() + poll_seconds = 2 + response = queue.read(visibility_timeout=None, + wait_time_seconds=poll_seconds) + total_time = time.time() - start + self.assertTrue(total_time > poll_seconds, + "SQS queue did not block for at least %s seconds: %s" % + (poll_seconds, total_time)) + self.assertIsNone(response) + + # Now that there's an element in the queue, we should not block for 2 + # seconds. + c.send_message(queue, 'test message') + start = time.time() + poll_seconds = 2 + message = c.receive_message( + queue, number_messages=1, + visibility_timeout=None, attributes=None, + wait_time_seconds=poll_seconds)[0] + total_time = time.time() - start + self.assertTrue(total_time < poll_seconds, + "SQS queue blocked longer than %s seconds: %s" % + (poll_seconds, total_time)) + self.assertEqual(message.get_body(), 'test message') + + attrs = c.get_queue_attributes(queue, 'ReceiveMessageWaitTimeSeconds') + self.assertEqual(attrs['ReceiveMessageWaitTimeSeconds'], '0') + + def test_sqs_longpoll(self): + c = SQSConnection() + queue_name = 'test_sqs_longpoll_%s' % int(time.time()) + queue = c.create_queue(queue_name) + self.addCleanup(c.delete_queue, queue, True) + messages = [] + + # The basic idea is to spawn a timer thread that will put something + # on the queue in 5 seconds and verify that our long polling client + # sees the message after waiting for approximately that long. + def send_message(): + messages.append( + queue.write(queue.new_message('this is a test message'))) + + t = Timer(5.0, send_message) + t.start() + self.addCleanup(t.join) + + start = time.time() + response = queue.read(wait_time_seconds=10) + end = time.time() + + t.join() + self.assertEqual(response.id, messages[0].id) + self.assertEqual(response.get_body(), messages[0].get_body()) + # The timer thread should send the message in 5 seconds, so + # we're giving +- 1 second for the total time the queue + # was blocked on the read call. + self.assertTrue(4.0 <= (end - start) <= 6.0) + + def test_queue_deletion_affects_full_queues(self): + conn = SQSConnection() + initial_count = len(conn.get_all_queues()) + + empty = conn.create_queue('empty%d' % int(time.time())) + full = conn.create_queue('full%d' % int(time.time())) + time.sleep(60) + # Make sure they're both around. + self.assertEqual(len(conn.get_all_queues()), initial_count + 2) + + # Put a message in the full queue. + m1 = Message() + m1.set_body('This is a test message.') + full.write(m1) + self.assertEqual(full.count(), 1) + + self.assertTrue(conn.delete_queue(empty)) + # Here's the regression for the docs. SQS will delete a queue with + # messages in it, no ``force_deletion`` needed. + self.assertTrue(conn.delete_queue(full)) + # Wait long enough for SQS to finally remove the queues. + time.sleep(90) + self.assertEqual(len(conn.get_all_queues()), initial_count) + + def test_get_messages_attributes(self): + conn = SQSConnection() + current_timestamp = int(time.time()) + test = self.create_temp_queue(conn) + time.sleep(65) + + # Put a message in the queue. + self.put_queue_message(test) + self.assertEqual(test.count(), 1) + + # Check all attributes. + msgs = test.get_messages( + num_messages=1, + attributes='All' + ) + for msg in msgs: + self.assertEqual(msg.attributes['ApproximateReceiveCount'], '1') + first_rec = msg.attributes['ApproximateFirstReceiveTimestamp'] + first_rec = int(first_rec) / 1000 + self.assertTrue(first_rec >= current_timestamp) + + # Put another message in the queue. + self.put_queue_message(test) + self.assertEqual(test.count(), 1) + + # Check a specific attribute. + msgs = test.get_messages( + num_messages=1, + attributes='ApproximateReceiveCount' + ) + for msg in msgs: + self.assertEqual(msg.attributes['ApproximateReceiveCount'], '1') + with self.assertRaises(KeyError): + msg.attributes['ApproximateFirstReceiveTimestamp'] + + def test_queue_purge(self): + conn = SQSConnection() + test = self.create_temp_queue(conn) + time.sleep(65) + + # Put some messages in the queue. + for x in range(0, 4): + self.put_queue_message(test) + self.assertEqual(test.count(), 4) + + # Now purge the queue + conn.purge_queue(test) + + # Now assert queue count is 0 + self.assertEqual(test.count(), 0) + + def create_temp_queue(self, conn): + current_timestamp = int(time.time()) + queue_name = 'test%d' % int(time.time()) + test = conn.create_queue(queue_name) + self.addCleanup(conn.delete_queue, test) + + return test + + def put_queue_message(self, queue): + m1 = Message() + m1.set_body('This is a test message.') + queue.write(m1) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/storage_uri/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/storage_uri/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/storage_uri/test_storage_uri.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/storage_uri/test_storage_uri.py new file mode 100644 index 0000000000000000000000000000000000000000..55dac1ad76a87660edacf2facdbb79c03482ebfa --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/storage_uri/test_storage_uri.py @@ -0,0 +1,63 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for StorageUri +""" + +from tests.unit import unittest +import time +import boto +from boto.s3.connection import S3Connection, Location + + +class StorageUriTest(unittest.TestCase): + s3 = True + + def nuke_bucket(self, bucket): + for key in bucket: + key.delete() + + bucket.delete() + + def test_storage_uri_regionless(self): + # First, create a bucket in a different region. + conn = S3Connection( + host='s3-us-west-2.amazonaws.com' + ) + bucket_name = 'keytest-%d' % int(time.time()) + bucket = conn.create_bucket(bucket_name, location=Location.USWest2) + self.addCleanup(self.nuke_bucket, bucket) + + # Now use ``storage_uri`` to try to make a new key. + # This would throw a 301 exception. + suri = boto.storage_uri('s3://%s/test' % bucket_name) + the_key = suri.new_key() + the_key.key = 'Test301' + the_key.set_contents_from_string( + 'This should store in a different region.' + ) + + # Check it a different way. + alt_conn = boto.connect_s3(host='s3-us-west-2.amazonaws.com') + alt_bucket = alt_conn.get_bucket(bucket_name) + alt_key = alt_bucket.get_key('Test301') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sts/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..354aa06fe0d42ad6f6ba0e0d11446c066c7f0da4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sts/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sts/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sts/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..31ae2a6c982c1ca2ee262ccf47f6bce88519ccfe --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sts/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.sts + + +class STSCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + sts = True + regions = boto.sts.regions() + + def sample_service_call(self, conn): + conn.get_session_token() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/sts/test_session_token.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/sts/test_session_token.py new file mode 100644 index 0000000000000000000000000000000000000000..2c911d3a96afebda36a424b761d8c5a948bcd8c6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/sts/test_session_token.py @@ -0,0 +1,91 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests for Session Tokens +""" + +import unittest +import os +from boto.exception import BotoServerError +from boto.sts.connection import STSConnection +from boto.sts.credentials import Credentials +from boto.s3.connection import S3Connection + + +class SessionTokenTest(unittest.TestCase): + sts = True + + def test_session_token(self): + print('--- running Session Token tests ---') + c = STSConnection() + + # Create a session token + token = c.get_session_token() + + # Save session token to a file + token.save('token.json') + + # Now load up a copy of that token + token_copy = Credentials.load('token.json') + assert token_copy.access_key == token.access_key + assert token_copy.secret_key == token.secret_key + assert token_copy.session_token == token.session_token + assert token_copy.expiration == token.expiration + assert token_copy.request_id == token.request_id + + os.unlink('token.json') + + assert not token.is_expired() + + # Try using the session token with S3 + s3 = S3Connection(aws_access_key_id=token.access_key, + aws_secret_access_key=token.secret_key, + security_token=token.session_token) + buckets = s3.get_all_buckets() + + print('--- tests completed ---') + + def test_assume_role_with_web_identity(self): + c = STSConnection(anon=True) + arn = 'arn:aws:iam::000240903217:role/FederatedWebIdentityRole' + wit = 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9' + + try: + creds = c.assume_role_with_web_identity( + role_arn=arn, + role_session_name='guestuser', + web_identity_token=wit, + provider_id='www.amazon.com', + ) + except BotoServerError as err: + self.assertEqual(err.status, 403) + self.assertTrue('Not authorized' in err.body) + + def test_decode_authorization_message(self): + c = STSConnection() + + try: + creds = c.decode_authorization_message('b94d27b9934') + except BotoServerError as err: + self.assertEqual(err.status, 400) + self.assertIn('InvalidAuthorizationMessageException', err.body) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/support/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/support/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/support/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/support/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..1127c0d095398e9fc1c121ecab9a9d2123369763 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/support/test_cert_verification.py @@ -0,0 +1,34 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.support + + +class SupportCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + support = True + regions = boto.support.regions() + + def sample_service_call(self, conn): + conn.describe_services() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/support/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/support/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..6b2b65d2f3cbc801fd075183652c5d01f2494fc0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/support/test_layer1.py @@ -0,0 +1,76 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import unittest +import time + +from boto.support.layer1 import SupportConnection +from boto.support import exceptions + + +class TestSupportLayer1Management(unittest.TestCase): + support = True + + def setUp(self): + self.api = SupportConnection() + self.wait_time = 5 + + def test_as_much_as_possible_before_teardown(self): + cases = self.api.describe_cases() + preexisting_count = len(cases.get('cases', [])) + + services = self.api.describe_services() + self.assertTrue('services' in services) + service_codes = [serv['code'] for serv in services['services']] + self.assertTrue('amazon-cloudsearch' in service_codes) + + severity = self.api.describe_severity_levels() + self.assertTrue('severityLevels' in severity) + severity_codes = [sev['code'] for sev in severity['severityLevels']] + self.assertTrue('low' in severity_codes) + + case_1 = self.api.create_case( + subject='TEST: I am a test case.', + service_code='amazon-cloudsearch', + category_code='other', + communication_body="This is a test problem", + severity_code='low', + language='en' + ) + time.sleep(self.wait_time) + case_id = case_1['caseId'] + + new_cases = self.api.describe_cases() + self.assertTrue(len(new_cases['cases']) > preexisting_count) + + result = self.api.add_communication_to_case( + communication_body="This is a test solution.", + case_id=case_id + ) + self.assertTrue(result.get('result', False)) + time.sleep(self.wait_time) + + final_cases = self.api.describe_cases(case_id_list=[case_id]) + comms = final_cases['cases'][0]['recentCommunications']\ + ['communications'] + self.assertEqual(len(comms), 2) + + close_result = self.api.resolve_case(case_id=case_id) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/swf/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/swf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/swf/test_cert_verification.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/swf/test_cert_verification.py new file mode 100644 index 0000000000000000000000000000000000000000..108f218bb4773399153b2a4c8f08820df01c48f3 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/swf/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.swf + + +class SWFCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + swf = True + regions = boto.swf.regions() + + def sample_service_call(self, conn): + conn.list_domains('REGISTERED') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/swf/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/swf/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..02ad0519074340d669f8f52523075c4954d80246 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/swf/test_layer1.py @@ -0,0 +1,246 @@ +""" +Tests for Layer1 of Simple Workflow + +""" +import os +import unittest +import time + +from boto.swf.layer1 import Layer1 +from boto.swf import exceptions as swf_exceptions + + + +# A standard AWS account is permitted a maximum of 100 of SWF domains, +# registered or deprecated. Deleting deprecated domains on demand does +# not appear possible. Therefore, these tests reuse a default or +# user-named testing domain. This is named by the user via the environment +# variable BOTO_SWF_UNITTEST_DOMAIN, if available. Otherwise the default +# testing domain is literally "boto-swf-unittest-domain". Do not use +# the testing domain for other purposes. +BOTO_SWF_UNITTEST_DOMAIN = os.environ.get("BOTO_SWF_UNITTEST_DOMAIN", + "boto-swf-unittest-domain") + +# A standard domain can have a maxiumum of 10,000 workflow types and +# activity types, registered or deprecated. Therefore, eventually any +# tests which register new workflow types or activity types would begin +# to fail with LimitExceeded. Instead of generating new workflow types +# and activity types, these tests reuse the existing types. + +# The consequence of the limits and inability to delete deprecated +# domains, workflow types, and activity types is that the tests in +# this module will not test for the three register actions: +# * register_domain +# * register_workflow_type +# * register_activity_type +# Instead, the setUp of the TestCase create a domain, workflow type, +# and activity type, expecting that they may already exist, and the +# tests themselves test other things. + +# If you really want to re-test the register_* functions in their +# ability to create things (rather than just reporting that they +# already exist), you'll need to use a new BOTO_SWF_UNITTEST_DOMAIN. +# But, beware that once you hit 100 domains, you are cannot create any +# more, delete existing ones, or rename existing ones. + +# Some API calls establish resources, but these resources are not instantly +# available to the next API call. For testing purposes, it is necessary to +# have a short pause to avoid having tests fail for invalid reasons. +PAUSE_SECONDS = 4 + + + +class SimpleWorkflowLayer1TestBase(unittest.TestCase): + """ + There are at least two test cases which share this setUp/tearDown + and the class-based parameter definitions: + * SimpleWorkflowLayer1Test + * tests.swf.test_layer1_workflow_execution.SwfL1WorkflowExecutionTest + """ + swf = True + # Some params used throughout the tests... + # Domain registration params... + _domain = BOTO_SWF_UNITTEST_DOMAIN + _workflow_execution_retention_period_in_days = 'NONE' + _domain_description = 'test workflow domain' + # Type registration params used for workflow type and activity type... + _task_list = 'tasklist1' + # Workflow type registration params... + _workflow_type_name = 'wft1' + _workflow_type_version = '1' + _workflow_type_description = 'wft1 description' + _default_child_policy = 'REQUEST_CANCEL' + _default_execution_start_to_close_timeout = '600' + _default_task_start_to_close_timeout = '60' + # Activity type registration params... + _activity_type_name = 'at1' + _activity_type_version = '1' + _activity_type_description = 'at1 description' + _default_task_heartbeat_timeout = '30' + _default_task_schedule_to_close_timeout = '90' + _default_task_schedule_to_start_timeout = '10' + _default_task_start_to_close_timeout = '30' + + + def setUp(self): + # Create a Layer1 connection for testing. + # Tester needs boto config or keys in environment variables. + self.conn = Layer1() + + # Register a domain. Expect None (success) or + # SWFDomainAlreadyExistsError. + try: + r = self.conn.register_domain(self._domain, + self._workflow_execution_retention_period_in_days, + description=self._domain_description) + assert r is None + time.sleep(PAUSE_SECONDS) + except swf_exceptions.SWFDomainAlreadyExistsError: + pass + + # Register a workflow type. Expect None (success) or + # SWFTypeAlreadyExistsError. + try: + r = self.conn.register_workflow_type(self._domain, + self._workflow_type_name, self._workflow_type_version, + task_list=self._task_list, + default_child_policy=self._default_child_policy, + default_execution_start_to_close_timeout= + self._default_execution_start_to_close_timeout, + default_task_start_to_close_timeout= + self._default_task_start_to_close_timeout, + description=self._workflow_type_description) + assert r is None + time.sleep(PAUSE_SECONDS) + except swf_exceptions.SWFTypeAlreadyExistsError: + pass + + # Register an activity type. Expect None (success) or + # SWFTypeAlreadyExistsError. + try: + r = self.conn.register_activity_type(self._domain, + self._activity_type_name, self._activity_type_version, + task_list=self._task_list, + default_task_heartbeat_timeout= + self._default_task_heartbeat_timeout, + default_task_schedule_to_close_timeout= + self._default_task_schedule_to_close_timeout, + default_task_schedule_to_start_timeout= + self._default_task_schedule_to_start_timeout, + default_task_start_to_close_timeout= + self._default_task_start_to_close_timeout, + description=self._activity_type_description) + assert r is None + time.sleep(PAUSE_SECONDS) + except swf_exceptions.SWFTypeAlreadyExistsError: + pass + + def tearDown(self): + # Delete what we can... + pass + + + + +class SimpleWorkflowLayer1Test(SimpleWorkflowLayer1TestBase): + + def test_list_domains(self): + # Find the domain. + r = self.conn.list_domains('REGISTERED') + found = None + for info in r['domainInfos']: + if info['name'] == self._domain: + found = info + break + self.assertNotEqual(found, None, 'list_domains; test domain not found') + # Validate some properties. + self.assertEqual(found['description'], self._domain_description, + 'list_domains; description does not match') + self.assertEqual(found['status'], 'REGISTERED', + 'list_domains; status does not match') + + def test_list_workflow_types(self): + # Find the workflow type. + r = self.conn.list_workflow_types(self._domain, 'REGISTERED') + found = None + for info in r['typeInfos']: + if ( info['workflowType']['name'] == self._workflow_type_name and + info['workflowType']['version'] == self._workflow_type_version ): + found = info + break + self.assertNotEqual(found, None, 'list_workflow_types; test type not found') + # Validate some properties. + self.assertEqual(found['description'], self._workflow_type_description, + 'list_workflow_types; description does not match') + self.assertEqual(found['status'], 'REGISTERED', + 'list_workflow_types; status does not match') + + def test_list_activity_types(self): + # Find the activity type. + r = self.conn.list_activity_types(self._domain, 'REGISTERED') + found = None + for info in r['typeInfos']: + if info['activityType']['name'] == self._activity_type_name: + found = info + break + self.assertNotEqual(found, None, 'list_activity_types; test type not found') + # Validate some properties. + self.assertEqual(found['description'], self._activity_type_description, + 'list_activity_types; description does not match') + self.assertEqual(found['status'], 'REGISTERED', + 'list_activity_types; status does not match') + + + def test_list_closed_workflow_executions(self): + # Test various legal ways to call function. + latest_date = time.time() + oldest_date = time.time() - 3600 + # With startTimeFilter... + self.conn.list_closed_workflow_executions(self._domain, + start_latest_date=latest_date, start_oldest_date=oldest_date) + # With closeTimeFilter... + self.conn.list_closed_workflow_executions(self._domain, + close_latest_date=latest_date, close_oldest_date=oldest_date) + # With closeStatusFilter... + self.conn.list_closed_workflow_executions(self._domain, + close_latest_date=latest_date, close_oldest_date=oldest_date, + close_status='COMPLETED') + # With tagFilter... + self.conn.list_closed_workflow_executions(self._domain, + close_latest_date=latest_date, close_oldest_date=oldest_date, + tag='ig') + # With executionFilter... + self.conn.list_closed_workflow_executions(self._domain, + close_latest_date=latest_date, close_oldest_date=oldest_date, + workflow_id='ig') + # With typeFilter... + self.conn.list_closed_workflow_executions(self._domain, + close_latest_date=latest_date, close_oldest_date=oldest_date, + workflow_name='ig', workflow_version='ig') + # With reverseOrder... + self.conn.list_closed_workflow_executions(self._domain, + close_latest_date=latest_date, close_oldest_date=oldest_date, + reverse_order=True) + + + def test_list_open_workflow_executions(self): + # Test various legal ways to call function. + latest_date = time.time() + oldest_date = time.time() - 3600 + # With required params only... + self.conn.list_closed_workflow_executions(self._domain, + latest_date, oldest_date) + # With tagFilter... + self.conn.list_closed_workflow_executions(self._domain, + latest_date, oldest_date, tag='ig') + # With executionFilter... + self.conn.list_closed_workflow_executions(self._domain, + latest_date, oldest_date, workflow_id='ig') + # With typeFilter... + self.conn.list_closed_workflow_executions(self._domain, + latest_date, oldest_date, + workflow_name='ig', workflow_version='ig') + # With reverseOrder... + self.conn.list_closed_workflow_executions(self._domain, + latest_date, oldest_date, reverse_order=True) + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/integration/swf/test_layer1_workflow_execution.py b/desktop/core/ext-py/boto-2.38.0/tests/integration/swf/test_layer1_workflow_execution.py new file mode 100644 index 0000000000000000000000000000000000000000..8b876a54eaa8616dfb1c59b165723940f0ab6ddb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/integration/swf/test_layer1_workflow_execution.py @@ -0,0 +1,173 @@ +""" +Tests for Layer1 of Simple Workflow + +""" +import time +import uuid +import json +import traceback + +from boto.swf.layer1_decisions import Layer1Decisions + +from tests.integration.swf.test_layer1 import SimpleWorkflowLayer1TestBase + + + +class SwfL1WorkflowExecutionTest(SimpleWorkflowLayer1TestBase): + """ + test a simple workflow execution + """ + swf = True + + def run_decider(self): + """ + run one iteration of a simple decision engine + """ + # Poll for a decision task. + tries = 0 + while True: + dtask = self.conn.poll_for_decision_task(self._domain, + self._task_list, reverse_order=True) + if dtask.get('taskToken') is not None: + # This means a real decision task has arrived. + break + time.sleep(2) + tries += 1 + if tries > 10: + # Give up if it's taking too long. Probably + # means something is broken somewhere else. + assert False, 'no decision task occurred' + + # Get the most recent interesting event. + ignorable = ( + 'DecisionTaskScheduled', + 'DecisionTaskStarted', + 'DecisionTaskTimedOut', + ) + event = None + for tevent in dtask['events']: + if tevent['eventType'] not in ignorable: + event = tevent + break + + # Construct the decision response. + decisions = Layer1Decisions() + if event['eventType'] == 'WorkflowExecutionStarted': + activity_id = str(uuid.uuid1()) + decisions.schedule_activity_task(activity_id, + self._activity_type_name, self._activity_type_version, + task_list=self._task_list, + input=event['workflowExecutionStartedEventAttributes']['input']) + elif event['eventType'] == 'ActivityTaskCompleted': + decisions.complete_workflow_execution( + result=event['activityTaskCompletedEventAttributes']['result']) + elif event['eventType'] == 'ActivityTaskFailed': + decisions.fail_workflow_execution( + reason=event['activityTaskFailedEventAttributes']['reason'], + details=event['activityTaskFailedEventAttributes']['details']) + else: + decisions.fail_workflow_execution( + reason='unhandled decision task type; %r' % (event['eventType'],)) + + # Send the decision response. + r = self.conn.respond_decision_task_completed(dtask['taskToken'], + decisions=decisions._data, + execution_context=None) + assert r is None + + + def run_worker(self): + """ + run one iteration of a simple worker engine + """ + # Poll for an activity task. + tries = 0 + while True: + atask = self.conn.poll_for_activity_task(self._domain, + self._task_list, identity='test worker') + if atask.get('activityId') is not None: + # This means a real activity task has arrived. + break + time.sleep(2) + tries += 1 + if tries > 10: + # Give up if it's taking too long. Probably + # means something is broken somewhere else. + assert False, 'no activity task occurred' + # Do the work or catch a "work exception." + reason = None + try: + result = json.dumps(sum(json.loads(atask['input']))) + except: + reason = 'an exception was raised' + details = traceback.format_exc() + if reason is None: + r = self.conn.respond_activity_task_completed( + atask['taskToken'], result) + else: + r = self.conn.respond_activity_task_failed( + atask['taskToken'], reason=reason, details=details) + assert r is None + + + def test_workflow_execution(self): + # Start a workflow execution whose activity task will succeed. + workflow_id = 'wfid-%.2f' % (time.time(),) + r = self.conn.start_workflow_execution(self._domain, + workflow_id, + self._workflow_type_name, + self._workflow_type_version, + execution_start_to_close_timeout='20', + input='[600, 15]') + # Need the run_id to lookup the execution history later. + run_id = r['runId'] + + # Move the workflow execution forward by having the + # decider schedule an activity task. + self.run_decider() + + # Run the worker to handle the scheduled activity task. + self.run_worker() + + # Complete the workflow execution by having the + # decider close it down. + self.run_decider() + + # Check that the result was stored in the execution history. + r = self.conn.get_workflow_execution_history(self._domain, + run_id, workflow_id, + reverse_order=True)['events'][0] + result = r['workflowExecutionCompletedEventAttributes']['result'] + assert json.loads(result) == 615 + + + def test_failed_workflow_execution(self): + # Start a workflow execution whose activity task will fail. + workflow_id = 'wfid-%.2f' % (time.time(),) + r = self.conn.start_workflow_execution(self._domain, + workflow_id, + self._workflow_type_name, + self._workflow_type_version, + execution_start_to_close_timeout='20', + input='[600, "s"]') + # Need the run_id to lookup the execution history later. + run_id = r['runId'] + + # Move the workflow execution forward by having the + # decider schedule an activity task. + self.run_decider() + + # Run the worker to handle the scheduled activity task. + self.run_worker() + + # Complete the workflow execution by having the + # decider close it down. + self.run_decider() + + # Check that the failure was stored in the execution history. + r = self.conn.get_workflow_execution_history(self._domain, + run_id, workflow_id, + reverse_order=True)['events'][0] + reason = r['workflowExecutionFailedEventAttributes']['reason'] + assert reason == 'an exception was raised' + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/_init_environment.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/_init_environment.py new file mode 100644 index 0000000000000000000000000000000000000000..3ca5cf6d81ea6774ed8d9c95c62bc5367de78d23 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/_init_environment.py @@ -0,0 +1,28 @@ +import os +import functools + +live_connection = False +mturk_host = 'mechanicalturk.sandbox.amazonaws.com' +external_url = 'http://www.example.com/' + + +SetHostMTurkConnection = None + +def config_environment(): + global SetHostMTurkConnection + try: + local = os.path.join(os.path.dirname(__file__), 'local.py') + execfile(local) + except: + pass + + if live_connection: + #TODO: you must set the auth credentials to something valid + from boto.mturk.connection import MTurkConnection + else: + # Here the credentials must be set, but it doesn't matter what + # they're set to. + os.environ.setdefault('AWS_ACCESS_KEY_ID', 'foo') + os.environ.setdefault('AWS_SECRET_ACCESS_KEY', 'bar') + from mocks import MTurkConnection + SetHostMTurkConnection = functools.partial(MTurkConnection, host=mturk_host) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/all_tests.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/all_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..ba2e12281f76881fa07a43df8b7bf417839fbbfd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/all_tests.py @@ -0,0 +1,24 @@ + +import unittest +import doctest +from glob import glob + +from create_hit_test import * +from create_hit_with_qualifications import * +from create_hit_external import * +from create_hit_with_qualifications import * +from hit_persistence import * + +doctest_suite = doctest.DocFileSuite( + *glob('*.doctest'), + **{'optionflags': doctest.REPORT_ONLY_FIRST_FAILURE} + ) + +class Program(unittest.TestProgram): + def runTests(self, *args, **kwargs): + self.test = unittest.TestSuite([self.test, doctest_suite]) + super(Program, self).runTests(*args, **kwargs) + +if __name__ == '__main__': + Program() + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/cleanup_tests.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/cleanup_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..bda5167514eebbd554168ea700bd7da9e00a192f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/cleanup_tests.py @@ -0,0 +1,47 @@ +import itertools + +from _init_environment import SetHostMTurkConnection +from _init_environment import config_environment + +def description_filter(substring): + return lambda hit: substring in hit.Title + +def disable_hit(hit): + return conn.disable_hit(hit.HITId) + +def dispose_hit(hit): + # assignments must be first approved or rejected + for assignment in conn.get_assignments(hit.HITId): + if assignment.AssignmentStatus == 'Submitted': + conn.approve_assignment(assignment.AssignmentId) + return conn.dispose_hit(hit.HITId) + +def cleanup(): + """Remove any boto test related HIT's""" + config_environment() + + global conn + + conn = SetHostMTurkConnection() + + + is_boto = description_filter('Boto') + print 'getting hits...' + all_hits = list(conn.get_all_hits()) + is_reviewable = lambda hit: hit.HITStatus == 'Reviewable' + is_not_reviewable = lambda hit: not is_reviewable(hit) + hits_to_process = filter(is_boto, all_hits) + hits_to_disable = filter(is_not_reviewable, hits_to_process) + hits_to_dispose = filter(is_reviewable, hits_to_process) + print 'disabling/disposing %d/%d hits' % (len(hits_to_disable), len(hits_to_dispose)) + map(disable_hit, hits_to_disable) + map(dispose_hit, hits_to_dispose) + + total_hits = len(all_hits) + hits_processed = len(hits_to_process) + skipped = total_hits - hits_processed + fmt = 'Processed: %(total_hits)d HITs, disabled/disposed: %(hits_processed)d, skipped: %(skipped)d' + print fmt % vars() + +if __name__ == '__main__': + cleanup() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/common.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/common.py new file mode 100644 index 0000000000000000000000000000000000000000..151714ae9a5533fd273f0a364f3fc47075f5e39f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/common.py @@ -0,0 +1,45 @@ +import unittest +import uuid +import datetime + +from boto.mturk.question import ( + Question, QuestionContent, AnswerSpecification, FreeTextAnswer, +) +from _init_environment import SetHostMTurkConnection, config_environment + +class MTurkCommon(unittest.TestCase): + def setUp(self): + config_environment() + self.conn = SetHostMTurkConnection() + + @staticmethod + def get_question(): + # create content for a question + qn_content = QuestionContent() + qn_content.append_field('Title', 'Boto no hit type question content') + qn_content.append_field('Text', 'What is a boto no hit type?') + + # create the question specification + qn = Question(identifier=str(uuid.uuid4()), + content=qn_content, + answer_spec=AnswerSpecification(FreeTextAnswer())) + return qn + + @staticmethod + def get_hit_params(): + return dict( + lifetime=datetime.timedelta(minutes=65), + max_assignments=2, + title='Boto create_hit title', + description='Boto create_hit description', + keywords=['boto', 'test'], + reward=0.23, + duration=datetime.timedelta(minutes=6), + approval_delay=60*60, + annotation='An annotation from boto create_hit test', + response_groups=['Minimal', + 'HITDetail', + 'HITQuestion', + 'HITAssignmentSummary',], + ) + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/create_hit_external.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/create_hit_external.py new file mode 100644 index 0000000000000000000000000000000000000000..f2264c80f4eac912d11547d991ab521bdd3456ee --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/create_hit_external.py @@ -0,0 +1,21 @@ +import unittest +import uuid +import datetime +from boto.mturk.question import ExternalQuestion + +from _init_environment import SetHostMTurkConnection, external_url, \ + config_environment + +class Test(unittest.TestCase): + def setUp(self): + config_environment() + + def test_create_hit_external(self): + q = ExternalQuestion(external_url=external_url, frame_height=800) + conn = SetHostMTurkConnection() + keywords=['boto', 'test', 'doctest'] + create_hit_rs = conn.create_hit(question=q, lifetime=60*65, max_assignments=2, title="Boto External Question Test", keywords=keywords, reward = 0.05, duration=60*6, approval_delay=60*60, annotation='An annotation from boto external question test', response_groups=['Minimal', 'HITDetail', 'HITQuestion', 'HITAssignmentSummary',]) + assert(create_hit_rs.status == True) + +if __name__ == "__main__": + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/create_hit_test.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/create_hit_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ea134b4c3c7659f48822b214467b6fd74911ec34 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/create_hit_test.py @@ -0,0 +1,21 @@ +import unittest +import os +from boto.mturk.question import QuestionForm + +from common import MTurkCommon + +class TestHITCreation(MTurkCommon): + def testCallCreateHitWithOneQuestion(self): + create_hit_rs = self.conn.create_hit( + question=self.get_question(), + **self.get_hit_params() + ) + + def testCallCreateHitWithQuestionForm(self): + create_hit_rs = self.conn.create_hit( + questions=QuestionForm([self.get_question()]), + **self.get_hit_params() + ) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/create_hit_with_qualifications.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/create_hit_with_qualifications.py new file mode 100644 index 0000000000000000000000000000000000000000..04559c15e8c33e6c926408dff06d55af58cd5fda --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/create_hit_with_qualifications.py @@ -0,0 +1,16 @@ +from boto.mturk.connection import MTurkConnection +from boto.mturk.question import ExternalQuestion +from boto.mturk.qualification import Qualifications, PercentAssignmentsApprovedRequirement + +def test(): + q = ExternalQuestion(external_url="http://websort.net/s/F3481C", frame_height=800) + conn = MTurkConnection(host='mechanicalturk.sandbox.amazonaws.com') + keywords=['boto', 'test', 'doctest'] + qualifications = Qualifications() + qualifications.add(PercentAssignmentsApprovedRequirement(comparator="GreaterThan", integer_value="95")) + create_hit_rs = conn.create_hit(question=q, lifetime=60*65, max_assignments=2, title="Boto External Question Test", keywords=keywords, reward = 0.05, duration=60*6, approval_delay=60*60, annotation='An annotation from boto external question test', qualifications=qualifications) + assert(create_hit_rs.status == True) + print create_hit_rs.HITTypeId + +if __name__ == "__main__": + test() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/hit_persistence.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/hit_persistence.py new file mode 100644 index 0000000000000000000000000000000000000000..04ebd0c25b01b8745f2be3bb1c6141ea073418c0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/hit_persistence.py @@ -0,0 +1,27 @@ +import unittest +import pickle + +from common import MTurkCommon + +class TestHITPersistence(MTurkCommon): + def create_hit_result(self): + return self.conn.create_hit( + question=self.get_question(), **self.get_hit_params() + ) + + def test_pickle_hit_result(self): + result = self.create_hit_result() + new_result = pickle.loads(pickle.dumps(result)) + + def test_pickle_deserialized_version(self): + """ + It seems the technique used to store and reload the object must + result in an equivalent object, or subsequent pickles may fail. + This tests a double-pickle to elicit that error. + """ + result = self.create_hit_result() + new_result = pickle.loads(pickle.dumps(result)) + pickle.dumps(new_result) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/mocks.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/mocks.py new file mode 100644 index 0000000000000000000000000000000000000000..0b2c52c43ce0d0eff81bf0979977d024b439bfdb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/mocks.py @@ -0,0 +1,11 @@ +from boto.mturk.connection import MTurkConnection as RealMTurkConnection + +class MTurkConnection(RealMTurkConnection): + """ + Mock MTurkConnection that doesn't connect, but instead just prepares + the request and captures information about its usage. + """ + + def _process_request(self, *args, **kwargs): + saved_args = self.__dict__.setdefault('_mock_saved_args', dict()) + saved_args['_process_request'] = (args, kwargs) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/run-doctest.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/run-doctest.py new file mode 100644 index 0000000000000000000000000000000000000000..802b773996925364328f735a85de2bfb8c42b82f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/run-doctest.py @@ -0,0 +1,13 @@ +import argparse +import doctest + +parser = argparse.ArgumentParser( + description="Run a test by name" + ) +parser.add_argument('test_name') +args = parser.parse_args() + +doctest.testfile( + args.test_name, + optionflags=doctest.REPORT_ONLY_FIRST_FAILURE + ) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/selenium_support.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/selenium_support.py new file mode 100644 index 0000000000000000000000000000000000000000..f1552cb22741950639949ba75f9d74ab05782ff3 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/selenium_support.py @@ -0,0 +1,61 @@ +from __future__ import absolute_import +from boto.mturk.test.support import unittest + +sel_args = ('localhost', 4444, '*chrome', 'https://workersandbox.mturk.com') + +class SeleniumFailed(object): + def __init__(self, message): + self.message = message + def __nonzero__(self): + return False + +def has_selenium(): + try: + from selenium import selenium + globals().update(selenium=selenium) + sel = selenium(*sel_args) + # a little trick to see if the server is responding + try: + sel.do_command('shutdown', '') + except Exception, e: + if not 'Server Exception' in str(e): + raise + result = True + except ImportError: + result = SeleniumFailed('selenium RC not installed') + except Exception: + msg = 'Error occurred initializing selenium: %s' % e + result = SeleniumFailed(msg) + + # overwrite has_selenium, so the same result is returned every time + globals().update(has_selenium=lambda: result) + return result + +identity = lambda x: x + +def skip_unless_has_selenium(): + res = has_selenium() + if not res: + return unittest.skip(res.message) + return identity + +def complete_hit(hit_type_id, response='Some Response'): + verificationErrors = [] + sel = selenium(*sel_args) + sel.start() + sel.open("/mturk/welcome") + sel.click("lnkWorkerSignin") + sel.wait_for_page_to_load("30000") + sel.type("email", "boto.tester@example.com") + sel.type("password", "BotoTest") + sel.click("Continue") + sel.wait_for_page_to_load("30000") + sel.open("/mturk/preview?groupId={hit_type_id}".format(**vars())) + sel.click("/accept") + sel.wait_for_page_to_load("30000") + sel.type("Answer_1_FreeText", response) + sel.click("//div[5]/table/tbody/tr[2]/td[1]/input") + sel.wait_for_page_to_load("30000") + sel.click("link=Sign Out") + sel.wait_for_page_to_load("30000") + sel.stop() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/support.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/support.py new file mode 100644 index 0000000000000000000000000000000000000000..26308255e09b20b23bf6496eeb5cc40c3656d041 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/support.py @@ -0,0 +1,7 @@ +import sys + +# use unittest2 under Python 2.6 and earlier. +if sys.version_info >= (2, 7): + import unittest +else: + import unittest2 as unittest diff --git a/desktop/core/ext-py/boto-2.38.0/tests/mturk/test_disable_hit.py b/desktop/core/ext-py/boto-2.38.0/tests/mturk/test_disable_hit.py new file mode 100644 index 0000000000000000000000000000000000000000..2d9bd9bfc00c30086683c94f601a93df3b6bf79b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/mturk/test_disable_hit.py @@ -0,0 +1,11 @@ +from tests.mturk.support import unittest + +from common import MTurkCommon +from boto.mturk.connection import MTurkRequestError + +class TestDisableHITs(MTurkCommon): + def test_disable_invalid_hit(self): + self.assertRaises(MTurkRequestError, self.conn.disable_hit, 'foo') + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/test.py b/desktop/core/ext-py/boto-2.38.0/tests/test.py new file mode 100755 index 0000000000000000000000000000000000000000..692ed4dd54186e0b72a322fad893e7155a39a1ef --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/test.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from __future__ import print_function + +import argparse +import os +import sys + +from nose.core import run + + +# This is a whitelist of unit tests that support Python 3. +# When porting a new module to Python 3, please update this +# list so that its tests will run by default. See the +# `default` target below for more information. +# We use this instead of test attributes/tags because in +# order to filter on tags nose must load each test - many +# will fail to import with Python 3. +PY3_WHITELIST = ( + 'tests/unit/auth', + 'tests/unit/beanstalk', + 'tests/unit/cloudformation', + 'tests/unit/cloudfront', + 'tests/unit/cloudsearch', + 'tests/unit/cloudsearch2', + 'tests/unit/cloudtrail', + 'tests/unit/directconnect', + 'tests/unit/dynamodb', + 'tests/unit/dynamodb2', + 'tests/unit/ecs', + 'tests/unit/elasticache', + 'tests/unit/emr', + 'tests/unit/glacier', + 'tests/unit/iam', + 'tests/unit/ec2', + 'tests/unit/logs', + 'tests/unit/manage', + 'tests/unit/mws', + 'tests/unit/provider', + 'tests/unit/rds2', + 'tests/unit/route53', + 'tests/unit/s3', + 'tests/unit/sns', + 'tests/unit/ses', + 'tests/unit/sqs', + 'tests/unit/sts', + 'tests/unit/swf', + 'tests/unit/utils', + 'tests/unit/vpc', + 'tests/unit/test_connection.py', + 'tests/unit/test_exception.py', + 'tests/unit/test_regioninfo.py', +) + + +def main(whitelist=[]): + description = ("Runs boto unit and/or integration tests. " + "Arguments will be passed on to nosetests. " + "See nosetests --help for more information.") + parser = argparse.ArgumentParser(description=description) + parser.add_argument('-t', '--service-tests', action="append", default=[], + help="Run tests for a given service. This will " + "run any test tagged with the specified value, " + "e.g -t s3 -t ec2") + known_args, remaining_args = parser.parse_known_args() + attribute_args = [] + for service_attribute in known_args.service_tests: + attribute_args.extend(['-a', '!notdefault,' + service_attribute]) + if not attribute_args: + # If the user did not specify any filtering criteria, we at least + # will filter out any test tagged 'notdefault'. + attribute_args = ['-a', '!notdefault'] + + # Set default tests used by e.g. tox. For Py2 this means all unit + # tests, while for Py3 it's just whitelisted ones. + if 'default' in remaining_args: + # Run from the base project directory + os.chdir(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + + for i, arg in enumerate(remaining_args): + if arg == 'default': + if sys.version_info[0] == 3: + del remaining_args[i] + remaining_args += PY3_WHITELIST + else: + remaining_args[i] = 'tests/unit' + + all_args = [__file__] + attribute_args + remaining_args + print("nose command:", ' '.join(all_args)) + if run(argv=all_args): + # run will return True is all the tests pass. We want + # this to equal a 0 rc + return 0 + else: + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3332965701a380a6991a57d70f4174f5bfe2dc4d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/__init__.py @@ -0,0 +1,110 @@ +from boto.compat import http_client +from tests.compat import mock, unittest + + +class AWSMockServiceTestCase(unittest.TestCase): + """Base class for mocking aws services.""" + # This param is used by the unittest module to display a full + # diff when assert*Equal methods produce an error message. + maxDiff = None + connection_class = None + + def setUp(self): + self.https_connection = mock.Mock(spec=http_client.HTTPSConnection) + self.https_connection.debuglevel = 0 + self.https_connection_factory = ( + mock.Mock(return_value=self.https_connection), ()) + self.service_connection = self.create_service_connection( + https_connection_factory=self.https_connection_factory, + aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key') + self.initialize_service_connection() + + def initialize_service_connection(self): + self.actual_request = None + self.original_mexe = self.service_connection._mexe + self.service_connection._mexe = self._mexe_spy + self.proxy = None + self.use_proxy = False + + def create_service_connection(self, **kwargs): + if self.connection_class is None: + raise ValueError("The connection_class class attribute must be " + "set to a non-None value.") + return self.connection_class(**kwargs) + + def _mexe_spy(self, request, *args, **kwargs): + self.actual_request = request + return self.original_mexe(request, *args, **kwargs) + + def create_response(self, status_code, reason='', header=[], body=None): + if body is None: + body = self.default_body() + response = mock.Mock(spec=http_client.HTTPResponse) + response.status = status_code + response.read.return_value = body + response.reason = reason + + response.getheaders.return_value = header + response.msg = dict(header) + + def overwrite_header(arg, default=None): + header_dict = dict(header) + if arg in header_dict: + return header_dict[arg] + else: + return default + response.getheader.side_effect = overwrite_header + + return response + + def assert_request_parameters(self, params, ignore_params_values=None): + """Verify the actual parameters sent to the service API.""" + request_params = self.actual_request.params.copy() + if ignore_params_values is not None: + for param in ignore_params_values: + try: + del request_params[param] + except KeyError: + pass + self.assertDictEqual(request_params, params) + + def set_http_response(self, status_code, reason='', header=[], body=None): + http_response = self.create_response(status_code, reason, header, body) + self.https_connection.getresponse.return_value = http_response + + def default_body(self): + return '' + + +class MockServiceWithConfigTestCase(AWSMockServiceTestCase): + def setUp(self): + super(MockServiceWithConfigTestCase, self).setUp() + self.environ = {} + self.config = {} + self.config_patch = mock.patch('boto.provider.config.get', + self.get_config) + self.has_config_patch = mock.patch('boto.provider.config.has_option', + self.has_config) + self.environ_patch = mock.patch('os.environ', self.environ) + self.config_patch.start() + self.has_config_patch.start() + self.environ_patch.start() + + def tearDown(self): + self.config_patch.stop() + self.has_config_patch.stop() + self.environ_patch.stop() + + def has_config(self, section_name, key): + try: + self.config[section_name][key] + return True + except KeyError: + return False + + def get_config(self, section_name, key, default=None): + try: + return self.config[section_name][key] + except KeyError: + return None diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/auth/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/auth/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/auth/test_sigv4.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/auth/test_sigv4.py new file mode 100644 index 0000000000000000000000000000000000000000..8f7876b6a99abb205b7bf3169fd6fe71e4457199 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/auth/test_sigv4.py @@ -0,0 +1,594 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import copy +import pickle +import os +from tests.compat import unittest, mock +from tests.unit import MockServiceWithConfigTestCase + +from boto.auth import HmacAuthV4Handler +from boto.auth import S3HmacAuthV4Handler +from boto.auth import detect_potential_s3sigv4 +from boto.auth import detect_potential_sigv4 +from boto.connection import HTTPRequest +from boto.provider import Provider +from boto.regioninfo import RegionInfo + + +class TestSigV4Handler(unittest.TestCase): + def setUp(self): + self.provider = mock.Mock() + self.provider.access_key = 'access_key' + self.provider.secret_key = 'secret_key' + self.request = HTTPRequest( + 'POST', 'https', 'glacier.us-east-1.amazonaws.com', 443, + '/-/vaults/foo/archives', None, {}, + {'x-amz-glacier-version': '2012-06-01'}, '') + + def test_not_adding_empty_qs(self): + self.provider.security_token = None + auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', mock.Mock(), self.provider) + req = copy.copy(self.request) + auth.add_auth(req) + self.assertEqual(req.path, '/-/vaults/foo/archives') + + def test_inner_whitespace_is_collapsed(self): + auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', + mock.Mock(), self.provider) + self.request.headers['x-amz-archive-description'] = 'two spaces' + self.request.headers['x-amz-quoted-string'] = ' "a b c" ' + headers = auth.headers_to_sign(self.request) + self.assertEqual(headers, {'Host': 'glacier.us-east-1.amazonaws.com', + 'x-amz-archive-description': 'two spaces', + 'x-amz-glacier-version': '2012-06-01', + 'x-amz-quoted-string': ' "a b c" '}) + # Note the single space between the "two spaces". + self.assertEqual(auth.canonical_headers(headers), + 'host:glacier.us-east-1.amazonaws.com\n' + 'x-amz-archive-description:two spaces\n' + 'x-amz-glacier-version:2012-06-01\n' + 'x-amz-quoted-string:"a b c"') + + def test_canonical_query_string(self): + auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', + mock.Mock(), self.provider) + request = HTTPRequest( + 'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443, + '/-/vaults/foo/archives', None, {}, + {'x-amz-glacier-version': '2012-06-01'}, '') + request.params['Foo.1'] = 'aaa' + request.params['Foo.10'] = 'zzz' + query_string = auth.canonical_query_string(request) + self.assertEqual(query_string, 'Foo.1=aaa&Foo.10=zzz') + + def test_query_string(self): + auth = HmacAuthV4Handler('sns.us-east-1.amazonaws.com', + mock.Mock(), self.provider) + params = { + 'Message': u'We \u2665 utf-8'.encode('utf-8'), + } + request = HTTPRequest( + 'POST', 'https', 'sns.us-east-1.amazonaws.com', 443, + '/', None, params, {}, '') + query_string = auth.query_string(request) + self.assertEqual(query_string, 'Message=We%20%E2%99%A5%20utf-8') + + def test_canonical_uri(self): + auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', + mock.Mock(), self.provider) + request = HTTPRequest( + 'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443, + 'x/./././x .html', None, {}, + {'x-amz-glacier-version': '2012-06-01'}, '') + canonical_uri = auth.canonical_uri(request) + # This should be both normalized & urlencoded. + self.assertEqual(canonical_uri, 'x/x%20.html') + + auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', + mock.Mock(), self.provider) + request = HTTPRequest( + 'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443, + 'x/./././x/html/', None, {}, + {'x-amz-glacier-version': '2012-06-01'}, '') + canonical_uri = auth.canonical_uri(request) + # Trailing slashes should be preserved. + self.assertEqual(canonical_uri, 'x/x/html/') + + request = HTTPRequest( + 'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443, + '/', None, {}, + {'x-amz-glacier-version': '2012-06-01'}, '') + canonical_uri = auth.canonical_uri(request) + # There should not be two-slashes. + self.assertEqual(canonical_uri, '/') + + # Make sure Windows-style slashes are converted properly + request = HTTPRequest( + 'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443, + '\\x\\x.html', None, {}, + {'x-amz-glacier-version': '2012-06-01'}, '') + canonical_uri = auth.canonical_uri(request) + self.assertEqual(canonical_uri, '/x/x.html') + + def test_credential_scope(self): + # test the AWS standard regions IAM endpoint + auth = HmacAuthV4Handler('iam.amazonaws.com', + mock.Mock(), self.provider) + request = HTTPRequest( + 'POST', 'https', 'iam.amazonaws.com', 443, + '/', '/', + {'Action': 'ListAccountAliases', 'Version': '2010-05-08'}, + { + 'Content-Length': '44', + 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', + 'X-Amz-Date': '20130808T013210Z' + }, + 'Action=ListAccountAliases&Version=2010-05-08') + credential_scope = auth.credential_scope(request) + region_name = credential_scope.split('/')[1] + self.assertEqual(region_name, 'us-east-1') + + # test the AWS GovCloud region IAM endpoint + auth = HmacAuthV4Handler('iam.us-gov.amazonaws.com', + mock.Mock(), self.provider) + request = HTTPRequest( + 'POST', 'https', 'iam.us-gov.amazonaws.com', 443, + '/', '/', + {'Action': 'ListAccountAliases', 'Version': '2010-05-08'}, + { + 'Content-Length': '44', + 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', + 'X-Amz-Date': '20130808T013210Z' + }, + 'Action=ListAccountAliases&Version=2010-05-08') + credential_scope = auth.credential_scope(request) + region_name = credential_scope.split('/')[1] + self.assertEqual(region_name, 'us-gov-west-1') + + # iam.us-west-1.amazonaws.com does not exist however this + # covers the remaining region_name control structure for a + # different region name + auth = HmacAuthV4Handler('iam.us-west-1.amazonaws.com', + mock.Mock(), self.provider) + request = HTTPRequest( + 'POST', 'https', 'iam.us-west-1.amazonaws.com', 443, + '/', '/', + {'Action': 'ListAccountAliases', 'Version': '2010-05-08'}, + { + 'Content-Length': '44', + 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', + 'X-Amz-Date': '20130808T013210Z' + }, + 'Action=ListAccountAliases&Version=2010-05-08') + credential_scope = auth.credential_scope(request) + region_name = credential_scope.split('/')[1] + self.assertEqual(region_name, 'us-west-1') + + # Test connections to custom locations, e.g. localhost:8080 + auth = HmacAuthV4Handler('localhost', mock.Mock(), self.provider, + service_name='iam') + + request = HTTPRequest( + 'POST', 'http', 'localhost', 8080, + '/', '/', + {'Action': 'ListAccountAliases', 'Version': '2010-05-08'}, + { + 'Content-Length': '44', + 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', + 'X-Amz-Date': '20130808T013210Z' + }, + 'Action=ListAccountAliases&Version=2010-05-08') + credential_scope = auth.credential_scope(request) + timestamp, region, service, v = credential_scope.split('/') + self.assertEqual(region, 'localhost') + self.assertEqual(service, 'iam') + + def test_headers_to_sign(self): + auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', + mock.Mock(), self.provider) + request = HTTPRequest( + 'GET', 'http', 'glacier.us-east-1.amazonaws.com', 80, + 'x/./././x .html', None, {}, + {'x-amz-glacier-version': '2012-06-01'}, '') + headers = auth.headers_to_sign(request) + # Port 80 & not secure excludes the port. + self.assertEqual(headers['Host'], 'glacier.us-east-1.amazonaws.com') + + request = HTTPRequest( + 'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443, + 'x/./././x .html', None, {}, + {'x-amz-glacier-version': '2012-06-01'}, '') + headers = auth.headers_to_sign(request) + # SSL port excludes the port. + self.assertEqual(headers['Host'], 'glacier.us-east-1.amazonaws.com') + + request = HTTPRequest( + 'GET', 'https', 'glacier.us-east-1.amazonaws.com', 8080, + 'x/./././x .html', None, {}, + {'x-amz-glacier-version': '2012-06-01'}, '') + headers = auth.headers_to_sign(request) + # URL should include port. + self.assertEqual(headers['Host'], 'glacier.us-east-1.amazonaws.com:8080') + + def test_region_and_service_can_be_overriden(self): + auth = HmacAuthV4Handler('queue.amazonaws.com', + mock.Mock(), self.provider) + self.request.headers['X-Amz-Date'] = '20121121000000' + + auth.region_name = 'us-west-2' + auth.service_name = 'sqs' + scope = auth.credential_scope(self.request) + self.assertEqual(scope, '20121121/us-west-2/sqs/aws4_request') + + def test_pickle_works(self): + provider = Provider('aws', access_key='access_key', + secret_key='secret_key') + auth = HmacAuthV4Handler('queue.amazonaws.com', None, provider) + + # Pickle it! + pickled = pickle.dumps(auth) + + # Now restore it + auth2 = pickle.loads(pickled) + self.assertEqual(auth.host, auth2.host) + + def test_bytes_header(self): + auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', + mock.Mock(), self.provider) + request = HTTPRequest( + 'GET', 'http', 'glacier.us-east-1.amazonaws.com', 80, + 'x/./././x .html', None, {}, + {'x-amz-glacier-version': '2012-06-01', 'x-amz-hash': b'f00'}, '') + canonical = auth.canonical_request(request) + + self.assertIn('f00', canonical) + + +class TestS3HmacAuthV4Handler(unittest.TestCase): + def setUp(self): + self.provider = mock.Mock() + self.provider.access_key = 'access_key' + self.provider.secret_key = 'secret_key' + self.provider.security_token = 'sekret_tokens' + self.request = HTTPRequest( + 'GET', 'https', 's3-us-west-2.amazonaws.com', 443, + '/awesome-bucket/?max-keys=0', None, {}, + {}, '' + ) + self.awesome_bucket_request = HTTPRequest( + method='GET', + protocol='https', + host='awesome-bucket.s3-us-west-2.amazonaws.com', + port=443, + path='/', + auth_path=None, + params={ + 'max-keys': 0, + }, + headers={ + 'User-Agent': 'Boto', + 'X-AMZ-Content-sha256': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855', + 'X-AMZ-Date': '20130605T193245Z', + }, + body='' + ) + self.auth = S3HmacAuthV4Handler( + host='awesome-bucket.s3-us-west-2.amazonaws.com', + config=mock.Mock(), + provider=self.provider, + region_name='s3-us-west-2' + ) + + def test_clean_region_name(self): + # Untouched. + cleaned = self.auth.clean_region_name('us-west-2') + self.assertEqual(cleaned, 'us-west-2') + + # Stripped of the ``s3-`` prefix. + cleaned = self.auth.clean_region_name('s3-us-west-2') + self.assertEqual(cleaned, 'us-west-2') + + # Untouched (classic). + cleaned = self.auth.clean_region_name('s3.amazonaws.com') + self.assertEqual(cleaned, 's3.amazonaws.com') + + # Untouched. + cleaned = self.auth.clean_region_name('something-s3-us-west-2') + self.assertEqual(cleaned, 'something-s3-us-west-2') + + def test_region_stripping(self): + auth = S3HmacAuthV4Handler( + host='s3-us-west-2.amazonaws.com', + config=mock.Mock(), + provider=self.provider + ) + self.assertEqual(auth.region_name, None) + + # What we wish we got. + auth = S3HmacAuthV4Handler( + host='s3-us-west-2.amazonaws.com', + config=mock.Mock(), + provider=self.provider, + region_name='us-west-2' + ) + self.assertEqual(auth.region_name, 'us-west-2') + + # What we actually get (i.e. ``s3-us-west-2``). + self.assertEqual(self.auth.region_name, 'us-west-2') + + def test_determine_region_name(self): + name = self.auth.determine_region_name('s3-us-west-2.amazonaws.com') + self.assertEqual(name, 'us-west-2') + + def test_canonical_uri(self): + request = HTTPRequest( + 'GET', 'https', 's3-us-west-2.amazonaws.com', 443, + 'x/./././x .html', None, {}, + {}, '' + ) + canonical_uri = self.auth.canonical_uri(request) + # S3 doesn't canonicalize the way other SigV4 services do. + # This just urlencoded, no normalization of the path. + self.assertEqual(canonical_uri, 'x/./././x%20.html') + + def test_determine_service_name(self): + # What we wish we got. + name = self.auth.determine_service_name( + 's3.us-west-2.amazonaws.com' + ) + self.assertEqual(name, 's3') + + # What we actually get. + name = self.auth.determine_service_name( + 's3-us-west-2.amazonaws.com' + ) + self.assertEqual(name, 's3') + + # What we wish we got with virtual hosting. + name = self.auth.determine_service_name( + 'bucket.s3.us-west-2.amazonaws.com' + ) + self.assertEqual(name, 's3') + + # What we actually get with virtual hosting. + name = self.auth.determine_service_name( + 'bucket.s3-us-west-2.amazonaws.com' + ) + self.assertEqual(name, 's3') + + def test_add_auth(self): + # The side-effects sideshow. + self.assertFalse('x-amz-content-sha256' in self.request.headers) + self.auth.add_auth(self.request) + self.assertTrue('x-amz-content-sha256' in self.request.headers) + the_sha = self.request.headers['x-amz-content-sha256'] + self.assertEqual( + the_sha, + 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + ) + + def test_host_header(self): + host = self.auth.host_header( + self.awesome_bucket_request.host, + self.awesome_bucket_request + ) + self.assertEqual(host, 'awesome-bucket.s3-us-west-2.amazonaws.com') + + def test_canonical_query_string(self): + qs = self.auth.canonical_query_string(self.awesome_bucket_request) + self.assertEqual(qs, 'max-keys=0') + + def test_correct_handling_of_plus_sign(self): + request = HTTPRequest( + 'GET', 'https', 's3-us-west-2.amazonaws.com', 443, + 'hello+world.txt', None, {}, + {}, '' + ) + canonical_uri = self.auth.canonical_uri(request) + # Ensure that things are properly quoted. + self.assertEqual(canonical_uri, 'hello%2Bworld.txt') + + request = HTTPRequest( + 'GET', 'https', 's3-us-west-2.amazonaws.com', 443, + 'hello%2Bworld.txt', None, {}, + {}, '' + ) + canonical_uri = self.auth.canonical_uri(request) + # Verify double escaping hasn't occurred. + self.assertEqual(canonical_uri, 'hello%2Bworld.txt') + + def test_mangle_path_and_params(self): + request = HTTPRequest( + method='GET', + protocol='https', + host='awesome-bucket.s3-us-west-2.amazonaws.com', + port=443, + # LOOK AT THIS PATH. JUST LOOK AT IT. + path='/?delete&max-keys=0', + auth_path=None, + params={ + 'key': 'why hello there', + # This gets overwritten, to make sure back-compat is maintained. + 'max-keys': 1, + }, + headers={ + 'User-Agent': 'Boto', + 'X-AMZ-Content-sha256': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855', + 'X-AMZ-Date': '20130605T193245Z', + }, + body='' + ) + + mod_req = self.auth.mangle_path_and_params(request) + self.assertEqual(mod_req.path, '/?delete&max-keys=0') + self.assertEqual(mod_req.auth_path, '/') + self.assertEqual(mod_req.params, { + 'max-keys': '0', + 'key': 'why hello there', + 'delete': '' + }) + + def test_canonical_request(self): + expected = """GET +/ +max-keys=0 +host:awesome-bucket.s3-us-west-2.amazonaws.com +user-agent:Boto +x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +x-amz-date:20130605T193245Z + +host;user-agent;x-amz-content-sha256;x-amz-date +e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855""" + + authed_req = self.auth.canonical_request(self.awesome_bucket_request) + self.assertEqual(authed_req, expected) + + # Now the way ``boto.s3`` actually sends data. + request = copy.copy(self.awesome_bucket_request) + request.path = request.auth_path = '/?max-keys=0' + request.params = {} + expected = """GET +/ +max-keys=0 +host:awesome-bucket.s3-us-west-2.amazonaws.com +user-agent:Boto +x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +x-amz-date:20130605T193245Z + +host;user-agent;x-amz-content-sha256;x-amz-date +e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855""" + + # Pre-mangle it. In practice, this happens as part of ``add_auth``, + # but that's a side-effect that's hard to test. + request = self.auth.mangle_path_and_params(request) + authed_req = self.auth.canonical_request(request) + self.assertEqual(authed_req, expected) + + def test_non_string_headers(self): + self.awesome_bucket_request.headers['Content-Length'] = 8 + canonical_headers = self.auth.canonical_headers( + self.awesome_bucket_request.headers) + self.assertEqual( + canonical_headers, + 'content-length:8\n' + 'user-agent:Boto\n' + 'x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae' + '41e4649b934ca495991b7852b855\n' + 'x-amz-date:20130605T193245Z' + ) + + +class FakeS3Connection(object): + def __init__(self, *args, **kwargs): + self.host = kwargs.pop('host', None) + + @detect_potential_s3sigv4 + def _required_auth_capability(self): + return ['nope'] + + def _mexe(self, *args, **kwargs): + pass + + +class FakeEC2Connection(object): + def __init__(self, *args, **kwargs): + self.region = kwargs.pop('region', None) + + @detect_potential_sigv4 + def _required_auth_capability(self): + return ['nope'] + + def _mexe(self, *args, **kwargs): + pass + + +class TestS3SigV4OptIn(MockServiceWithConfigTestCase): + connection_class = FakeS3Connection + + def test_sigv4_opt_out(self): + # Default is opt-out. + fake = FakeS3Connection(host='s3.amazonaws.com') + self.assertEqual(fake._required_auth_capability(), ['nope']) + + def test_sigv4_non_optional(self): + # Requires SigV4. + for region in ['.cn-north', '.eu-central', '-eu-central']: + fake = FakeS3Connection(host='s3' + region + '-1.amazonaws.com') + self.assertEqual( + fake._required_auth_capability(), ['hmac-v4-s3']) + + def test_sigv4_opt_in_config(self): + # Opt-in via the config. + self.config = { + 's3': { + 'use-sigv4': True, + }, + } + fake = FakeS3Connection() + self.assertEqual(fake._required_auth_capability(), ['hmac-v4-s3']) + + def test_sigv4_opt_in_env(self): + # Opt-in via the ENV. + self.environ['S3_USE_SIGV4'] = True + fake = FakeS3Connection(host='s3.amazonaws.com') + self.assertEqual(fake._required_auth_capability(), ['hmac-v4-s3']) + + +class TestSigV4OptIn(MockServiceWithConfigTestCase): + connection_class = FakeEC2Connection + + def setUp(self): + super(TestSigV4OptIn, self).setUp() + self.standard_region = RegionInfo( + name='us-west-2', + endpoint='ec2.us-west-2.amazonaws.com' + ) + self.sigv4_region = RegionInfo( + name='cn-north-1', + endpoint='ec2.cn-north-1.amazonaws.com.cn' + ) + + def test_sigv4_opt_out(self): + # Default is opt-out. + fake = FakeEC2Connection(region=self.standard_region) + self.assertEqual(fake._required_auth_capability(), ['nope']) + + def test_sigv4_non_optional(self): + # Requires SigV4. + fake = FakeEC2Connection(region=self.sigv4_region) + self.assertEqual(fake._required_auth_capability(), ['hmac-v4']) + + def test_sigv4_opt_in_config(self): + # Opt-in via the config. + self.config = { + 'ec2': { + 'use-sigv4': True, + }, + } + fake = FakeEC2Connection(region=self.standard_region) + self.assertEqual(fake._required_auth_capability(), ['hmac-v4']) + + def test_sigv4_opt_in_env(self): + # Opt-in via the ENV. + self.environ['EC2_USE_SIGV4'] = True + fake = FakeEC2Connection(region=self.standard_region) + self.assertEqual(fake._required_auth_capability(), ['hmac-v4']) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/auth/test_stsanon.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/auth/test_stsanon.py new file mode 100644 index 0000000000000000000000000000000000000000..9079a7d834ad50728a8a2fe26227e50fe6714630 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/auth/test_stsanon.py @@ -0,0 +1,78 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import copy +from mock import Mock +from tests.unit import unittest + +from boto.auth import STSAnonHandler +from boto.connection import HTTPRequest + + +class TestSTSAnonHandler(unittest.TestCase): + def setUp(self): + self.provider = Mock() + self.provider.access_key = 'access_key' + self.provider.secret_key = 'secret_key' + self.request = HTTPRequest( + method='GET', + protocol='https', + host='sts.amazonaws.com', + port=443, + path='/', + auth_path=None, + params={ + 'Action': 'AssumeRoleWithWebIdentity', + 'Version': '2011-06-15', + 'RoleSessionName': 'web-identity-federation', + 'ProviderId': '2012-06-01', + 'WebIdentityToken': 'Atza|IQEBLjAsAhRkcxQ', + }, + headers={}, + body='' + ) + + def test_escape_value(self): + auth = STSAnonHandler('sts.amazonaws.com', + Mock(), self.provider) + # This is changed from a previous version because this string is + # being passed to the query string and query strings must + # be url encoded. + value = auth._escape_value('Atza|IQEBLjAsAhRkcxQ') + self.assertEqual(value, 'Atza%7CIQEBLjAsAhRkcxQ') + + def test_build_query_string(self): + auth = STSAnonHandler('sts.amazonaws.com', + Mock(), self.provider) + query_string = auth._build_query_string(self.request.params) + self.assertEqual(query_string, 'Action=AssumeRoleWithWebIdentity' + \ + '&ProviderId=2012-06-01&RoleSessionName=web-identity-federation' + \ + '&Version=2011-06-15&WebIdentityToken=Atza%7CIQEBLjAsAhRkcxQ') + + def test_add_auth(self): + auth = STSAnonHandler('sts.amazonaws.com', + Mock(), self.provider) + req = copy.copy(self.request) + auth.add_auth(req) + self.assertEqual(req.body, + 'Action=AssumeRoleWithWebIdentity' + \ + '&ProviderId=2012-06-01&RoleSessionName=web-identity-federation' + \ + '&Version=2011-06-15&WebIdentityToken=Atza%7CIQEBLjAsAhRkcxQ') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/awslambda/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/awslambda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a3575e2ed1f4fb166a786d2a051f6d43d682ece2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/awslambda/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/awslambda/test_awslambda.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/awslambda/test_awslambda.py new file mode 100644 index 0000000000000000000000000000000000000000..3e36aee5fc03462c3c48fa7ded71eaf77f1d7959 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/awslambda/test_awslambda.py @@ -0,0 +1,117 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import tempfile +import shutil +import os +import socket + +from boto.compat import json +from boto.awslambda.layer1 import AWSLambdaConnection +from tests.unit import AWSMockServiceTestCase +from tests.compat import mock + + +class TestAWSLambda(AWSMockServiceTestCase): + connection_class = AWSLambdaConnection + + def default_body(self): + return b'{}' + + def test_upload_function_binary(self): + self.set_http_response(status_code=201) + function_data = b'This is my file' + self.service_connection.upload_function( + function_name='my-function', + function_zip=function_data, + role='myrole', + handler='myhandler', + mode='event', + runtime='nodejs' + ) + self.assertEqual(self.actual_request.body, function_data) + self.assertEqual( + self.actual_request.headers['Content-Length'], + str(len(function_data)) + ) + self.assertEqual( + self.actual_request.path, + '/2014-11-13/functions/my-function?Handler=myhandler&Mode' + '=event&Role=myrole&Runtime=nodejs' + ) + + def test_upload_function_file(self): + self.set_http_response(status_code=201) + rootdir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, rootdir) + + filename = 'test_file' + function_data = b'This is my file' + full_path = os.path.join(rootdir, filename) + + with open(full_path, 'wb') as f: + f.write(function_data) + + with open(full_path, 'rb') as f: + self.service_connection.upload_function( + function_name='my-function', + function_zip=f, + role='myrole', + handler='myhandler', + mode='event', + runtime='nodejs' + ) + self.assertEqual(self.actual_request.body.read(), + function_data) + self.assertEqual( + self.actual_request.headers['Content-Length'], + str(len(function_data)) + ) + self.assertEqual( + self.actual_request.path, + '/2014-11-13/functions/my-function?Handler=myhandler&Mode' + '=event&Role=myrole&Runtime=nodejs' + ) + + def test_upload_function_unseekable_file_no_tell(self): + sock = socket.socket() + with self.assertRaises(TypeError): + self.service_connection.upload_function( + function_name='my-function', + function_zip=sock, + role='myrole', + handler='myhandler', + mode='event', + runtime='nodejs' + ) + + def test_upload_function_unseekable_file_cannot_tell(self): + mock_file = mock.Mock() + mock_file.tell.side_effect = IOError + with self.assertRaises(TypeError): + self.service_connection.upload_function( + function_name='my-function', + function_zip=mock_file, + role='myrole', + handler='myhandler', + mode='event', + runtime='nodejs' + ) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/beanstalk/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/beanstalk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/beanstalk/test_exception.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/beanstalk/test_exception.py new file mode 100644 index 0000000000000000000000000000000000000000..23d3656e5bf6c98807a97fa6d66a478d4ede226d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/beanstalk/test_exception.py @@ -0,0 +1,49 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.beanstalk.exception import simple +from tests.compat import unittest + + +class FakeError(object): + def __init__(self, code, status, reason, body): + self.code = code + self.status = status + self.reason = reason + self.body = body + + +class TestExceptions(unittest.TestCase): + def test_exception_class_names(self): + # Create exception from class name + error = FakeError('TooManyApplications', 400, 'foo', 'bar') + exception = simple(error) + self.assertEqual(exception.__class__.__name__, 'TooManyApplications') + + # Create exception from class name + 'Exception' as seen from the + # live service today + error = FakeError('TooManyApplicationsException', 400, 'foo', 'bar') + exception = simple(error) + self.assertEqual(exception.__class__.__name__, 'TooManyApplications') + + # Make sure message body is present + self.assertEqual(exception.message, 'bar') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/beanstalk/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/beanstalk/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..c59cfafad7f6a7c6f190cda43d41fdecccc66a20 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/beanstalk/test_layer1.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python + +import json + +from tests.unit import AWSMockServiceTestCase + +from boto.beanstalk.layer1 import Layer1 + +# These tests are just checking the basic structure of +# the Elastic Beanstalk code, by picking a few calls +# and verifying we get the expected results with mocked +# responses. The integration tests actually verify the +# API calls interact with the service correctly. +class TestListAvailableSolutionStacks(AWSMockServiceTestCase): + connection_class = Layer1 + + def default_body(self): + return json.dumps( + {u'ListAvailableSolutionStacksResponse': + {u'ListAvailableSolutionStacksResult': + {u'SolutionStackDetails': [ + {u'PermittedFileTypes': [u'war', u'zip'], + u'SolutionStackName': u'32bit Amazon Linux running Tomcat 7'}, + {u'PermittedFileTypes': [u'zip'], + u'SolutionStackName': u'32bit Amazon Linux running PHP 5.3'}], + u'SolutionStacks': [u'32bit Amazon Linux running Tomcat 7', + u'32bit Amazon Linux running PHP 5.3']}, + u'ResponseMetadata': {u'RequestId': u'request_id'}}}).encode('utf-8') + + def test_list_available_solution_stacks(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.list_available_solution_stacks() + stack_details = api_response['ListAvailableSolutionStacksResponse']\ + ['ListAvailableSolutionStacksResult']\ + ['SolutionStackDetails'] + solution_stacks = api_response['ListAvailableSolutionStacksResponse']\ + ['ListAvailableSolutionStacksResult']\ + ['SolutionStacks'] + self.assertEqual(solution_stacks, + [u'32bit Amazon Linux running Tomcat 7', + u'32bit Amazon Linux running PHP 5.3']) + # These are the parameters that are actually sent to the CloudFormation + # service. + self.assert_request_parameters({ + 'Action': 'ListAvailableSolutionStacks', + 'ContentType': 'JSON', + 'Version': '2010-12-01', + }) + + +class TestCreateApplicationVersion(AWSMockServiceTestCase): + connection_class = Layer1 + + def default_body(self): + return json.dumps({ + 'CreateApplicationVersionResponse': + {u'CreateApplicationVersionResult': + {u'ApplicationVersion': + {u'ApplicationName': u'application1', + u'DateCreated': 1343067094.342, + u'DateUpdated': 1343067094.342, + u'Description': None, + u'SourceBundle': {u'S3Bucket': u'elasticbeanstalk-us-east-1', + u'S3Key': u'resources/elasticbeanstalk-sampleapp.war'}, + u'VersionLabel': u'version1'}}}}).encode('utf-8') + + def test_create_application_version(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_application_version( + 'application1', 'version1', s3_bucket='mybucket', s3_key='mykey', + auto_create_application=True) + app_version = api_response['CreateApplicationVersionResponse']\ + ['CreateApplicationVersionResult']\ + ['ApplicationVersion'] + self.assert_request_parameters({ + 'Action': 'CreateApplicationVersion', + 'ContentType': 'JSON', + 'Version': '2010-12-01', + 'ApplicationName': 'application1', + 'AutoCreateApplication': 'true', + 'SourceBundle.S3Bucket': 'mybucket', + 'SourceBundle.S3Key': 'mykey', + 'VersionLabel': 'version1', + }) + self.assertEqual(app_version['ApplicationName'], 'application1') + self.assertEqual(app_version['VersionLabel'], 'version1') + + +class TestCreateEnvironment(AWSMockServiceTestCase): + connection_class = Layer1 + + def default_body(self): + return json.dumps({}).encode('utf-8') + + def test_create_environment(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_environment( + 'application1', 'environment1', 'version1', + '32bit Amazon Linux running Tomcat 7', + option_settings=[ + ('aws:autoscaling:launchconfiguration', 'Ec2KeyName', + 'mykeypair'), + ('aws:elasticbeanstalk:application:environment', 'ENVVAR', + 'VALUE1')]) + self.assert_request_parameters({ + 'Action': 'CreateEnvironment', + 'ApplicationName': 'application1', + 'EnvironmentName': 'environment1', + 'TemplateName': '32bit Amazon Linux running Tomcat 7', + 'ContentType': 'JSON', + 'Version': '2010-12-01', + 'VersionLabel': 'version1', + 'OptionSettings.member.1.Namespace': 'aws:autoscaling:launchconfiguration', + 'OptionSettings.member.1.OptionName': 'Ec2KeyName', + 'OptionSettings.member.1.Value': 'mykeypair', + 'OptionSettings.member.2.Namespace': 'aws:elasticbeanstalk:application:environment', + 'OptionSettings.member.2.OptionName': 'ENVVAR', + 'OptionSettings.member.2.Value': 'VALUE1', + }) + + def test_create_environment_with_tier(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_environment( + 'application1', 'environment1', 'version1', + '32bit Amazon Linux running Tomcat 7', + option_settings=[ + ('aws:autoscaling:launchconfiguration', 'Ec2KeyName', + 'mykeypair'), + ('aws:elasticbeanstalk:application:environment', 'ENVVAR', + 'VALUE1')], + tier_name='Worker', tier_type='SQS/HTTP', tier_version='1.0') + self.assert_request_parameters({ + 'Action': 'CreateEnvironment', + 'ApplicationName': 'application1', + 'EnvironmentName': 'environment1', + 'TemplateName': '32bit Amazon Linux running Tomcat 7', + 'ContentType': 'JSON', + 'Version': '2010-12-01', + 'VersionLabel': 'version1', + 'OptionSettings.member.1.Namespace': 'aws:autoscaling:launchconfiguration', + 'OptionSettings.member.1.OptionName': 'Ec2KeyName', + 'OptionSettings.member.1.Value': 'mykeypair', + 'OptionSettings.member.2.Namespace': 'aws:elasticbeanstalk:application:environment', + 'OptionSettings.member.2.OptionName': 'ENVVAR', + 'OptionSettings.member.2.Value': 'VALUE1', + 'Tier.Name': 'Worker', + 'Tier.Type': 'SQS/HTTP', + 'Tier.Version': '1.0', + }) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudformation/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudformation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudformation/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudformation/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..613e3d2acbf6aebe3d0bea281e11de470a63dd37 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudformation/test_connection.py @@ -0,0 +1,724 @@ +#!/usr/bin/env python +import unittest +from datetime import datetime +from mock import Mock + +from tests.unit import AWSMockServiceTestCase +from boto.cloudformation.connection import CloudFormationConnection +from boto.exception import BotoServerError +from boto.compat import json + +SAMPLE_TEMPLATE = r""" +{ + "AWSTemplateFormatVersion" : "2010-09-09", + "Description" : "Sample template", + "Parameters" : { + "KeyName" : { + "Description" : "key pair", + "Type" : "String" + } + }, + "Resources" : { + "Ec2Instance" : { + "Type" : "AWS::EC2::Instance", + "Properties" : { + "KeyName" : { "Ref" : "KeyName" }, + "ImageId" : "ami-7f418316", + "UserData" : { "Fn::Base64" : "80" } + } + } + }, + "Outputs" : { + "InstanceId" : { + "Description" : "InstanceId of the newly created EC2 instance", + "Value" : { "Ref" : "Ec2Instance" } + } +} +""" + +class CloudFormationConnectionBase(AWSMockServiceTestCase): + connection_class = CloudFormationConnection + + def setUp(self): + super(CloudFormationConnectionBase, self).setUp() + self.stack_id = u'arn:aws:cloudformation:us-east-1:18:stack/Name/id' + + +class TestCloudFormationCreateStack(CloudFormationConnectionBase): + def default_body(self): + return json.dumps( + {u'CreateStackResponse': + {u'CreateStackResult': {u'StackId': self.stack_id}, + u'ResponseMetadata': {u'RequestId': u'1'}}}).encode('utf-8') + + def test_create_stack_has_correct_request_params(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_stack( + 'stack_name', template_url='http://url', + template_body=SAMPLE_TEMPLATE, + parameters=[('KeyName', 'myKeyName')], + tags={'TagKey': 'TagValue'}, + notification_arns=['arn:notify1', 'arn:notify2'], + disable_rollback=True, + timeout_in_minutes=20, capabilities=['CAPABILITY_IAM'] + ) + self.assertEqual(api_response, self.stack_id) + # These are the parameters that are actually sent to the CloudFormation + # service. + self.assert_request_parameters({ + 'Action': 'CreateStack', + 'Capabilities.member.1': 'CAPABILITY_IAM', + 'ContentType': 'JSON', + 'DisableRollback': 'true', + 'NotificationARNs.member.1': 'arn:notify1', + 'NotificationARNs.member.2': 'arn:notify2', + 'Parameters.member.1.ParameterKey': 'KeyName', + 'Parameters.member.1.ParameterValue': 'myKeyName', + 'Tags.member.1.Key': 'TagKey', + 'Tags.member.1.Value': 'TagValue', + 'StackName': 'stack_name', + 'Version': '2010-05-15', + 'TimeoutInMinutes': 20, + 'TemplateBody': SAMPLE_TEMPLATE, + 'TemplateURL': 'http://url', + }) + + # The test_create_stack_has_correct_request_params verified all of the + # params needed when making a create_stack service call. The rest of the + # tests for create_stack only verify specific parts of the params sent + # to CloudFormation. + + def test_create_stack_with_minimum_args(self): + # This will fail in practice, but the API docs only require stack_name. + self.set_http_response(status_code=200) + api_response = self.service_connection.create_stack('stack_name') + self.assertEqual(api_response, self.stack_id) + self.assert_request_parameters({ + 'Action': 'CreateStack', + 'ContentType': 'JSON', + 'DisableRollback': 'false', + 'StackName': 'stack_name', + 'Version': '2010-05-15', + }) + + def test_create_stack_fails(self): + self.set_http_response(status_code=400, reason='Bad Request', + body=b'{"Error": {"Code": 1, "Message": "Invalid arg."}}') + with self.assertRaisesRegexp(self.service_connection.ResponseError, + 'Invalid arg.'): + api_response = self.service_connection.create_stack( + 'stack_name', template_body=SAMPLE_TEMPLATE, + parameters=[('KeyName', 'myKeyName')]) + + def test_create_stack_fail_error(self): + self.set_http_response(status_code=400, reason='Bad Request', + body=b'{"RequestId": "abc", "Error": {"Code": 1, "Message": "Invalid arg."}}') + try: + api_response = self.service_connection.create_stack( + 'stack_name', template_body=SAMPLE_TEMPLATE, + parameters=[('KeyName', 'myKeyName')]) + except BotoServerError as e: + self.assertEqual('abc', e.request_id) + self.assertEqual(1, e.error_code) + self.assertEqual('Invalid arg.', e.message) + +class TestCloudFormationUpdateStack(CloudFormationConnectionBase): + def default_body(self): + return json.dumps( + {u'UpdateStackResponse': + {u'UpdateStackResult': {u'StackId': self.stack_id}, + u'ResponseMetadata': {u'RequestId': u'1'}}}).encode('utf-8') + + def test_update_stack_all_args(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.update_stack( + 'stack_name', template_url='http://url', + template_body=SAMPLE_TEMPLATE, + parameters=[('KeyName', 'myKeyName'), ('KeyName2', "", True), + ('KeyName3', "", False), ('KeyName4', None, True), + ('KeyName5', "Ignore Me", True)], + tags={'TagKey': 'TagValue'}, + notification_arns=['arn:notify1', 'arn:notify2'], + disable_rollback=True, + timeout_in_minutes=20, + use_previous_template=True + ) + self.assert_request_parameters({ + 'Action': 'UpdateStack', + 'ContentType': 'JSON', + 'DisableRollback': 'true', + 'NotificationARNs.member.1': 'arn:notify1', + 'NotificationARNs.member.2': 'arn:notify2', + 'Parameters.member.1.ParameterKey': 'KeyName', + 'Parameters.member.1.ParameterValue': 'myKeyName', + 'Parameters.member.2.ParameterKey': 'KeyName2', + 'Parameters.member.2.UsePreviousValue': 'true', + 'Parameters.member.3.ParameterKey': 'KeyName3', + 'Parameters.member.3.ParameterValue': '', + 'Parameters.member.4.UsePreviousValue': 'true', + 'Parameters.member.4.ParameterKey': 'KeyName4', + 'Parameters.member.5.UsePreviousValue': 'true', + 'Parameters.member.5.ParameterKey': 'KeyName5', + 'Tags.member.1.Key': 'TagKey', + 'Tags.member.1.Value': 'TagValue', + 'StackName': 'stack_name', + 'Version': '2010-05-15', + 'TimeoutInMinutes': 20, + 'TemplateBody': SAMPLE_TEMPLATE, + 'TemplateURL': 'http://url', + 'UsePreviousTemplate': 'true', + }) + + def test_update_stack_with_minimum_args(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.update_stack('stack_name') + self.assertEqual(api_response, self.stack_id) + self.assert_request_parameters({ + 'Action': 'UpdateStack', + 'ContentType': 'JSON', + 'DisableRollback': 'false', + 'StackName': 'stack_name', + 'Version': '2010-05-15', + }) + + def test_update_stack_fails(self): + self.set_http_response(status_code=400, reason='Bad Request', + body=b'Invalid arg.') + with self.assertRaises(self.service_connection.ResponseError): + api_response = self.service_connection.update_stack( + 'stack_name', template_body=SAMPLE_TEMPLATE, + parameters=[('KeyName', 'myKeyName')]) + + +class TestCloudFormationDeleteStack(CloudFormationConnectionBase): + def default_body(self): + return json.dumps( + {u'DeleteStackResponse': + {u'ResponseMetadata': {u'RequestId': u'1'}}}).encode('utf-8') + + def test_delete_stack(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_stack('stack_name') + self.assertEqual(api_response, json.loads(self.default_body().decode('utf-8'))) + self.assert_request_parameters({ + 'Action': 'DeleteStack', + 'ContentType': 'JSON', + 'StackName': 'stack_name', + 'Version': '2010-05-15', + }) + + def test_delete_stack_fails(self): + self.set_http_response(status_code=400) + with self.assertRaises(self.service_connection.ResponseError): + api_response = self.service_connection.delete_stack('stack_name') + + +class TestCloudFormationDescribeStackResource(CloudFormationConnectionBase): + def default_body(self): + return json.dumps('fake server response').encode('utf-8') + + def test_describe_stack_resource(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.describe_stack_resource( + 'stack_name', 'resource_id') + self.assertEqual(api_response, 'fake server response') + self.assert_request_parameters({ + 'Action': 'DescribeStackResource', + 'ContentType': 'JSON', + 'LogicalResourceId': 'resource_id', + 'StackName': 'stack_name', + 'Version': '2010-05-15', + }) + + def test_describe_stack_resource_fails(self): + self.set_http_response(status_code=400) + with self.assertRaises(self.service_connection.ResponseError): + api_response = self.service_connection.describe_stack_resource( + 'stack_name', 'resource_id') + + +class TestCloudFormationGetTemplate(CloudFormationConnectionBase): + def default_body(self): + return json.dumps('fake server response').encode('utf-8') + + def test_get_template(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.get_template('stack_name') + self.assertEqual(api_response, 'fake server response') + self.assert_request_parameters({ + 'Action': 'GetTemplate', + 'ContentType': 'JSON', + 'StackName': 'stack_name', + 'Version': '2010-05-15', + }) + + + def test_get_template_fails(self): + self.set_http_response(status_code=400) + with self.assertRaises(self.service_connection.ResponseError): + api_response = self.service_connection.get_template('stack_name') + + +class TestCloudFormationGetStackevents(CloudFormationConnectionBase): + def default_body(self): + return b""" + + + + Event-1-Id + arn:aws:cfn:us-east-1:1:stack + MyStack + MyStack + MyStack_One + AWS::CloudFormation::Stack + 2010-07-27T22:26:28Z + CREATE_IN_PROGRESS + User initiated + + + Event-2-Id + arn:aws:cfn:us-east-1:1:stack + MyStack + MySG1 + MyStack_SG1 + AWS::SecurityGroup + 2010-07-27T22:28:28Z + CREATE_COMPLETE + + + + """ + + def test_describe_stack_events(self): + self.set_http_response(status_code=200) + first, second = self.service_connection.describe_stack_events('stack_name', next_token='next_token') + self.assertEqual(first.event_id, 'Event-1-Id') + self.assertEqual(first.logical_resource_id, 'MyStack') + self.assertEqual(first.physical_resource_id, 'MyStack_One') + self.assertEqual(first.resource_properties, None) + self.assertEqual(first.resource_status, 'CREATE_IN_PROGRESS') + self.assertEqual(first.resource_status_reason, 'User initiated') + self.assertEqual(first.resource_type, 'AWS::CloudFormation::Stack') + self.assertEqual(first.stack_id, 'arn:aws:cfn:us-east-1:1:stack') + self.assertEqual(first.stack_name, 'MyStack') + self.assertIsNotNone(first.timestamp) + + self.assertEqual(second.event_id, 'Event-2-Id') + self.assertEqual(second.logical_resource_id, 'MySG1') + self.assertEqual(second.physical_resource_id, 'MyStack_SG1') + self.assertEqual(second.resource_properties, None) + self.assertEqual(second.resource_status, 'CREATE_COMPLETE') + self.assertEqual(second.resource_status_reason, None) + self.assertEqual(second.resource_type, 'AWS::SecurityGroup') + self.assertEqual(second.stack_id, 'arn:aws:cfn:us-east-1:1:stack') + self.assertEqual(second.stack_name, 'MyStack') + self.assertIsNotNone(second.timestamp) + + self.assert_request_parameters({ + 'Action': 'DescribeStackEvents', + 'NextToken': 'next_token', + 'StackName': 'stack_name', + 'Version': '2010-05-15', + }) + + +class TestCloudFormationDescribeStackResources(CloudFormationConnectionBase): + def default_body(self): + return b""" + + + + arn:aws:cfn:us-east-1:1:stack + MyStack + MyDBInstance + MyStack_DB1 + AWS::DBInstance + 2010-07-27T22:27:28Z + CREATE_COMPLETE + + + arn:aws:cfn:us-east-1:1:stack + MyStack + MyAutoScalingGroup + MyStack_ASG1 + AWS::AutoScalingGroup + 2010-07-27T22:28:28Z + CREATE_IN_PROGRESS + + + + """ + + def test_describe_stack_resources(self): + self.set_http_response(status_code=200) + first, second = self.service_connection.describe_stack_resources( + 'stack_name', 'logical_resource_id', 'physical_resource_id') + self.assertEqual(first.description, None) + self.assertEqual(first.logical_resource_id, 'MyDBInstance') + self.assertEqual(first.physical_resource_id, 'MyStack_DB1') + self.assertEqual(first.resource_status, 'CREATE_COMPLETE') + self.assertEqual(first.resource_status_reason, None) + self.assertEqual(first.resource_type, 'AWS::DBInstance') + self.assertEqual(first.stack_id, 'arn:aws:cfn:us-east-1:1:stack') + self.assertEqual(first.stack_name, 'MyStack') + self.assertIsNotNone(first.timestamp) + + self.assertEqual(second.description, None) + self.assertEqual(second.logical_resource_id, 'MyAutoScalingGroup') + self.assertEqual(second.physical_resource_id, 'MyStack_ASG1') + self.assertEqual(second.resource_status, 'CREATE_IN_PROGRESS') + self.assertEqual(second.resource_status_reason, None) + self.assertEqual(second.resource_type, 'AWS::AutoScalingGroup') + self.assertEqual(second.stack_id, 'arn:aws:cfn:us-east-1:1:stack') + self.assertEqual(second.stack_name, 'MyStack') + self.assertIsNotNone(second.timestamp) + + self.assert_request_parameters({ + 'Action': 'DescribeStackResources', + 'LogicalResourceId': 'logical_resource_id', + 'PhysicalResourceId': 'physical_resource_id', + 'StackName': 'stack_name', + 'Version': '2010-05-15', + }) + + +class TestCloudFormationDescribeStacks(CloudFormationConnectionBase): + def default_body(self): + return b""" + + + + + arn:aws:cfn:us-east-1:1:stack + CREATE_COMPLETE + MyStack + + My Description + 2012-05-16T22:55:31Z + + CAPABILITY_IAM + + + arn:aws:sns:region-name:account-name:topic-name + + false + + + MyValue + MyKey + + + + + http://url/ + Server URL + ServerURL + + + + + MyTagKey + MyTagValue + + + + + + + 12345 + + + """ + + def test_describe_stacks(self): + self.set_http_response(status_code=200) + + stacks = self.service_connection.describe_stacks('MyStack') + self.assertEqual(len(stacks), 1) + + stack = stacks[0] + self.assertEqual(stack.creation_time, + datetime(2012, 5, 16, 22, 55, 31)) + self.assertEqual(stack.description, 'My Description') + self.assertEqual(stack.disable_rollback, False) + self.assertEqual(stack.stack_id, 'arn:aws:cfn:us-east-1:1:stack') + self.assertEqual(stack.stack_status, 'CREATE_COMPLETE') + self.assertEqual(stack.stack_name, 'MyStack') + self.assertEqual(stack.stack_name_reason, None) + self.assertEqual(stack.timeout_in_minutes, None) + + self.assertEqual(len(stack.outputs), 1) + self.assertEqual(stack.outputs[0].description, 'Server URL') + self.assertEqual(stack.outputs[0].key, 'ServerURL') + self.assertEqual(stack.outputs[0].value, 'http://url/') + + self.assertEqual(len(stack.parameters), 1) + self.assertEqual(stack.parameters[0].key, 'MyKey') + self.assertEqual(stack.parameters[0].value, 'MyValue') + + self.assertEqual(len(stack.capabilities), 1) + self.assertEqual(stack.capabilities[0].value, 'CAPABILITY_IAM') + + self.assertEqual(len(stack.notification_arns), 1) + self.assertEqual(stack.notification_arns[0].value, 'arn:aws:sns:region-name:account-name:topic-name') + + self.assertEqual(len(stack.tags), 1) + self.assertEqual(stack.tags['MyTagKey'], 'MyTagValue') + + self.assert_request_parameters({ + 'Action': 'DescribeStacks', + 'StackName': 'MyStack', + 'Version': '2010-05-15', + }) + + +class TestCloudFormationListStackResources(CloudFormationConnectionBase): + def default_body(self): + return b""" + + + + + CREATE_COMPLETE + SampleDB + 2011-06-21T20:25:57Z + My-db-ycx + AWS::RDS::DBInstance + + + CREATE_COMPLETE + CPUAlarmHigh + 2011-06-21T20:29:23Z + MyStack-CPUH-PF + AWS::CloudWatch::Alarm + + + + + 2d06e36c-ac1d-11e0-a958-f9382b6eb86b + + + """ + + def test_list_stack_resources(self): + self.set_http_response(status_code=200) + resources = self.service_connection.list_stack_resources('MyStack', + next_token='next_token') + self.assertEqual(len(resources), 2) + self.assertEqual(resources[0].last_updated_time, + datetime(2011, 6, 21, 20, 25, 57)) + self.assertEqual(resources[0].logical_resource_id, 'SampleDB') + self.assertEqual(resources[0].physical_resource_id, 'My-db-ycx') + self.assertEqual(resources[0].resource_status, 'CREATE_COMPLETE') + self.assertEqual(resources[0].resource_status_reason, None) + self.assertEqual(resources[0].resource_type, 'AWS::RDS::DBInstance') + + self.assertEqual(resources[1].last_updated_time, + datetime(2011, 6, 21, 20, 29, 23)) + self.assertEqual(resources[1].logical_resource_id, 'CPUAlarmHigh') + self.assertEqual(resources[1].physical_resource_id, 'MyStack-CPUH-PF') + self.assertEqual(resources[1].resource_status, 'CREATE_COMPLETE') + self.assertEqual(resources[1].resource_status_reason, None) + self.assertEqual(resources[1].resource_type, 'AWS::CloudWatch::Alarm') + + self.assert_request_parameters({ + 'Action': 'ListStackResources', + 'NextToken': 'next_token', + 'StackName': 'MyStack', + 'Version': '2010-05-15', + }) + + +class TestCloudFormationListStacks(CloudFormationConnectionBase): + def default_body(self): + return b""" + + + + + arn:aws:cfn:us-east-1:1:stack/Test1/aa + CREATE_IN_PROGRESS + vpc1 + 2011-05-23T15:47:44Z + My Description. + + + + + """ + + def test_list_stacks(self): + self.set_http_response(status_code=200) + stacks = self.service_connection.list_stacks(['CREATE_IN_PROGRESS'], + next_token='next_token') + self.assertEqual(len(stacks), 1) + self.assertEqual(stacks[0].stack_id, + 'arn:aws:cfn:us-east-1:1:stack/Test1/aa') + self.assertEqual(stacks[0].stack_status, 'CREATE_IN_PROGRESS') + self.assertEqual(stacks[0].stack_name, 'vpc1') + self.assertEqual(stacks[0].creation_time, + datetime(2011, 5, 23, 15, 47, 44)) + self.assertEqual(stacks[0].deletion_time, None) + self.assertEqual(stacks[0].template_description, 'My Description.') + + self.assert_request_parameters({ + 'Action': 'ListStacks', + 'NextToken': 'next_token', + 'StackStatusFilter.member.1': 'CREATE_IN_PROGRESS', + 'Version': '2010-05-15', + }) + + +class TestCloudFormationValidateTemplate(CloudFormationConnectionBase): + def default_body(self): + return b""" + + + My Description. + + + false + InstanceType + Type of instance to launch + m1.small + + + false + KeyName + EC2 KeyPair + + + Reason + + CAPABILITY_IAM + + + + 0be7b6e8-e4a0-11e0-a5bd-9f8d5a7dbc91 + + + """ + + def test_validate_template(self): + self.set_http_response(status_code=200) + template = self.service_connection.validate_template(template_body=SAMPLE_TEMPLATE, + template_url='http://url') + self.assertEqual(template.description, 'My Description.') + self.assertEqual(len(template.template_parameters), 2) + param1, param2 = template.template_parameters + self.assertEqual(param1.default_value, 'm1.small') + self.assertEqual(param1.description, 'Type of instance to launch') + self.assertEqual(param1.no_echo, True) + self.assertEqual(param1.parameter_key, 'InstanceType') + + self.assertEqual(param2.default_value, None) + self.assertEqual(param2.description, 'EC2 KeyPair') + self.assertEqual(param2.no_echo, True) + self.assertEqual(param2.parameter_key, 'KeyName') + + self.assertEqual(template.capabilities_reason, 'Reason') + + self.assertEqual(len(template.capabilities), 1) + self.assertEqual(template.capabilities[0].value, 'CAPABILITY_IAM') + + self.assert_request_parameters({ + 'Action': 'ValidateTemplate', + 'TemplateBody': SAMPLE_TEMPLATE, + 'TemplateURL': 'http://url', + 'Version': '2010-05-15', + }) + + +class TestCloudFormationCancelUpdateStack(CloudFormationConnectionBase): + def default_body(self): + return b"""""" + + def test_cancel_update_stack(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.cancel_update_stack('stack_name') + self.assertEqual(api_response, True) + self.assert_request_parameters({ + 'Action': 'CancelUpdateStack', + 'StackName': 'stack_name', + 'Version': '2010-05-15', + }) + + +class TestCloudFormationEstimateTemplateCost(CloudFormationConnectionBase): + def default_body(self): + return b""" + { + "EstimateTemplateCostResponse": { + "EstimateTemplateCostResult": { + "Url": "http://calculator.s3.amazonaws.com/calc5.html?key=cf-2e351785-e821-450c-9d58-625e1e1ebfb6" + } + } + } + """ + + def test_estimate_template_cost(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.estimate_template_cost( + template_body='{}') + self.assertEqual(api_response, + 'http://calculator.s3.amazonaws.com/calc5.html?key=cf-2e351785-e821-450c-9d58-625e1e1ebfb6') + self.assert_request_parameters({ + 'Action': 'EstimateTemplateCost', + 'ContentType': 'JSON', + 'TemplateBody': '{}', + 'Version': '2010-05-15', + }) + + +class TestCloudFormationGetStackPolicy(CloudFormationConnectionBase): + def default_body(self): + return b""" + { + "GetStackPolicyResponse": { + "GetStackPolicyResult": { + "StackPolicyBody": "{...}" + } + } + } + """ + + def test_get_stack_policy(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.get_stack_policy('stack-id') + self.assertEqual(api_response, '{...}') + self.assert_request_parameters({ + 'Action': 'GetStackPolicy', + 'ContentType': 'JSON', + 'StackName': 'stack-id', + 'Version': '2010-05-15', + }) + + +class TestCloudFormationSetStackPolicy(CloudFormationConnectionBase): + def default_body(self): + return b""" + { + "SetStackPolicyResponse": { + "SetStackPolicyResult": { + "Some": "content" + } + } + } + """ + + def test_set_stack_policy(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.set_stack_policy('stack-id', + stack_policy_body='{}') + self.assertDictEqual(api_response, {'SetStackPolicyResult': {'Some': 'content'}}) + self.assert_request_parameters({ + 'Action': 'SetStackPolicy', + 'ContentType': 'JSON', + 'StackName': 'stack-id', + 'StackPolicyBody': '{}', + 'Version': '2010-05-15', + }) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudformation/test_stack.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudformation/test_stack.py new file mode 100644 index 0000000000000000000000000000000000000000..82edc06af934ed23c5c2abc0210b575b4dab2884 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudformation/test_stack.py @@ -0,0 +1,256 @@ +#!/usr/bin/env python +import datetime +import xml.sax +import unittest +import boto.handler +import boto.resultset +import boto.cloudformation + +SAMPLE_XML = b""" + + + + + + + value0 + key0 + + + key1 + value1 + + + arn:aws:cloudformation:ap-southeast-1:100:stack/Name/id + CREATE_COMPLETE + Name + + + + arn:aws:sns:ap-southeast-1:100:name + + 2013-01-10T05:04:56Z + false + + + value0 + output0 + key0 + + + value1 + output1 + key1 + + + + + + + 1 + + +""" + +DESCRIBE_STACK_RESOURCE_XML = b""" + + + + arn:aws:cloudformation:us-east-1:123456789:stack/MyStack/aaf549a0-a413-11df-adb3-5081b3858e83 + MyStack + MyDBInstance + MyStack_DB1 + AWS::DBInstance + 2010-07-27T22:27:28Z + CREATE_COMPLETE + + + arn:aws:cloudformation:us-east-1:123456789:stack/MyStack/aaf549a0-a413-11df-adb3-5081b3858e83 + MyStack + MyAutoScalingGroup + MyStack_ASG1 + AWS::AutoScalingGroup + 2010-07-27T22:28:28.123456Z + CREATE_IN_PROGRESS + + + +""" + +LIST_STACKS_XML = b""" + + + + + + arn:aws:cloudformation:us-east-1:1234567:stack/TestCreate1/aaaaa + + CREATE_IN_PROGRESS + vpc1 + 2011-05-23T15:47:44Z + + Creates one EC2 instance and a load balancer. + + + + + arn:aws:cloudformation:us-east-1:1234567:stack/TestDelete2/bbbbb + + DELETE_COMPLETE + 2011-03-10T16:20:51.575757Z + WP1 + 2011-03-05T19:57:58.161616Z + + A simple basic Cloudformation Template. + + + + + +""" + +LIST_STACK_RESOURCES_XML = b""" + + + + + CREATE_COMPLETE + DBSecurityGroup + 2011-06-21T20:15:58Z + gmarcteststack-dbsecuritygroup-1s5m0ez5lkk6w + AWS::RDS::DBSecurityGroup + + + CREATE_COMPLETE + SampleDB + 2011-06-21T20:25:57.875643Z + MyStack-sampledb-ycwhk1v830lx + AWS::RDS::DBInstance + + + + + 2d06e36c-ac1d-11e0-a958-f9382b6eb86b + + +""" + +class TestStackParse(unittest.TestCase): + def test_parse_tags(self): + rs = boto.resultset.ResultSet([ + ('member', boto.cloudformation.stack.Stack) + ]) + h = boto.handler.XmlHandler(rs, None) + xml.sax.parseString(SAMPLE_XML, h) + tags = rs[0].tags + self.assertEqual(tags, {u'key0': u'value0', u'key1': u'value1'}) + + def test_event_creation_time_with_millis(self): + millis_xml = SAMPLE_XML.replace( + b"2013-01-10T05:04:56Z", + b"2013-01-10T05:04:56.102342Z" + ) + + rs = boto.resultset.ResultSet([ + ('member', boto.cloudformation.stack.Stack) + ]) + h = boto.handler.XmlHandler(rs, None) + xml.sax.parseString(millis_xml, h) + creation_time = rs[0].creation_time + self.assertEqual( + creation_time, + datetime.datetime(2013, 1, 10, 5, 4, 56, 102342) + ) + + def test_resource_time_with_millis(self): + rs = boto.resultset.ResultSet([ + ('member', boto.cloudformation.stack.StackResource) + ]) + h = boto.handler.XmlHandler(rs, None) + xml.sax.parseString(DESCRIBE_STACK_RESOURCE_XML, h) + timestamp_1 = rs[0].timestamp + self.assertEqual( + timestamp_1, + datetime.datetime(2010, 7, 27, 22, 27, 28) + ) + timestamp_2 = rs[1].timestamp + self.assertEqual( + timestamp_2, + datetime.datetime(2010, 7, 27, 22, 28, 28, 123456) + ) + + def test_list_stacks_time_with_millis(self): + rs = boto.resultset.ResultSet([ + ('member', boto.cloudformation.stack.StackSummary) + ]) + h = boto.handler.XmlHandler(rs, None) + xml.sax.parseString(LIST_STACKS_XML, h) + timestamp_1 = rs[0].creation_time + self.assertEqual( + timestamp_1, + datetime.datetime(2011, 5, 23, 15, 47, 44) + ) + timestamp_2 = rs[1].creation_time + self.assertEqual( + timestamp_2, + datetime.datetime(2011, 3, 5, 19, 57, 58, 161616) + ) + timestamp_3 = rs[1].deletion_time + self.assertEqual( + timestamp_3, + datetime.datetime(2011, 3, 10, 16, 20, 51, 575757) + ) + + def test_list_stacks_time_with_millis_again(self): + rs = boto.resultset.ResultSet([ + ('member', boto.cloudformation.stack.StackResourceSummary) + ]) + h = boto.handler.XmlHandler(rs, None) + xml.sax.parseString(LIST_STACK_RESOURCES_XML, h) + timestamp_1 = rs[0].last_updated_time + self.assertEqual( + timestamp_1, + datetime.datetime(2011, 6, 21, 20, 15, 58) + ) + timestamp_2 = rs[1].last_updated_time + self.assertEqual( + timestamp_2, + datetime.datetime(2011, 6, 21, 20, 25, 57, 875643) + ) + + def test_disable_rollback_false(self): + # SAMPLE_XML defines DisableRollback=="false" + rs = boto.resultset.ResultSet([('member', boto.cloudformation.stack.Stack)]) + h = boto.handler.XmlHandler(rs, None) + xml.sax.parseString(SAMPLE_XML, h) + disable_rollback = rs[0].disable_rollback + self.assertFalse(disable_rollback) + + def test_disable_rollback_false_upper(self): + # Should also handle "False" + rs = boto.resultset.ResultSet([('member', boto.cloudformation.stack.Stack)]) + h = boto.handler.XmlHandler(rs, None) + sample_xml_upper = SAMPLE_XML.replace(b'false', b'False') + xml.sax.parseString(sample_xml_upper, h) + disable_rollback = rs[0].disable_rollback + self.assertFalse(disable_rollback) + + def test_disable_rollback_true(self): + rs = boto.resultset.ResultSet([('member', boto.cloudformation.stack.Stack)]) + h = boto.handler.XmlHandler(rs, None) + sample_xml_upper = SAMPLE_XML.replace(b'false', b'true') + xml.sax.parseString(sample_xml_upper, h) + disable_rollback = rs[0].disable_rollback + self.assertTrue(disable_rollback) + + def test_disable_rollback_true_upper(self): + rs = boto.resultset.ResultSet([('member', boto.cloudformation.stack.Stack)]) + h = boto.handler.XmlHandler(rs, None) + sample_xml_upper = SAMPLE_XML.replace(b'false', b'True') + xml.sax.parseString(sample_xml_upper, h) + disable_rollback = rs[0].disable_rollback + self.assertTrue(disable_rollback) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..342c36559340f2c57e84919add5e8a701dc91c45 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_connection.py @@ -0,0 +1,204 @@ +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.cloudfront import CloudFrontConnection +from boto.cloudfront.distribution import Distribution, DistributionConfig, DistributionSummary +from boto.cloudfront.origin import CustomOrigin + + +class TestCloudFrontConnection(AWSMockServiceTestCase): + connection_class = CloudFrontConnection + + def setUp(self): + super(TestCloudFrontConnection, self).setUp() + + def test_get_all_distributions(self): + body = b""" + + + 100 + false + + EEEEEEEEEEEEE + InProgress + 2014-02-03T11:03:41.087Z + abcdef12345678.cloudfront.net + + example.com + 80 + 443 + http-only + + static.example.com + true + + + """ + self.set_http_response(status_code=200, body=body) + response = self.service_connection.get_all_distributions() + + self.assertTrue(isinstance(response, list)) + self.assertEqual(len(response), 1) + self.assertTrue(isinstance(response[0], DistributionSummary)) + self.assertEqual(response[0].id, "EEEEEEEEEEEEE") + self.assertEqual(response[0].domain_name, "abcdef12345678.cloudfront.net") + self.assertEqual(response[0].status, "InProgress") + self.assertEqual(response[0].cnames, ["static.example.com"]) + self.assertEqual(response[0].enabled, True) + self.assertTrue(isinstance(response[0].origin, CustomOrigin)) + self.assertEqual(response[0].origin.dns_name, "example.com") + self.assertEqual(response[0].origin.http_port, 80) + self.assertEqual(response[0].origin.https_port, 443) + self.assertEqual(response[0].origin.origin_protocol_policy, 'http-only') + + def test_get_distribution_config(self): + body = b""" + + + example.com + 80 + 443 + http-only + + 1234567890123 + static.example.com + true + + """ + + self.set_http_response(status_code=200, body=body, header={"Etag": "AABBCC"}) + response = self.service_connection.get_distribution_config('EEEEEEEEEEEEE') + + self.assertTrue(isinstance(response, DistributionConfig)) + self.assertTrue(isinstance(response.origin, CustomOrigin)) + self.assertEqual(response.origin.dns_name, "example.com") + self.assertEqual(response.origin.http_port, 80) + self.assertEqual(response.origin.https_port, 443) + self.assertEqual(response.origin.origin_protocol_policy, "http-only") + self.assertEqual(response.cnames, ["static.example.com"]) + self.assertTrue(response.enabled) + self.assertEqual(response.etag, "AABBCC") + + def test_set_distribution_config(self): + get_body = b""" + + + example.com + 80 + 443 + http-only + + 1234567890123 + static.example.com + true + + """ + + put_body = b""" + + EEEEEE + InProgress + 2014-02-04T10:47:53.493Z + 0 + d2000000000000.cloudfront.net + + + example.com + 80 + 443 + match-viewer + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + this is a comment + false + + + """ + + self.set_http_response(status_code=200, body=get_body, header={"Etag": "AA"}) + conf = self.service_connection.get_distribution_config('EEEEEEE') + + self.set_http_response(status_code=200, body=put_body, header={"Etag": "AABBCCD"}) + conf.comment = 'this is a comment' + response = self.service_connection.set_distribution_config('EEEEEEE', conf.etag, conf) + + self.assertEqual(response, "AABBCCD") + + def test_get_distribution_info(self): + body = b""" + + EEEEEEEEEEEEE + InProgress + 2014-02-03T11:03:41.087Z + 0 + abcdef12345678.cloudfront.net + + + example.com + 80 + 443 + http-only + + 1111111111111 + static.example.com + true + + + """ + + self.set_http_response(status_code=200, body=body) + response = self.service_connection.get_distribution_info('EEEEEEEEEEEEE') + + self.assertTrue(isinstance(response, Distribution)) + self.assertTrue(isinstance(response.config, DistributionConfig)) + self.assertTrue(isinstance(response.config.origin, CustomOrigin)) + self.assertEqual(response.config.origin.dns_name, "example.com") + self.assertEqual(response.config.origin.http_port, 80) + self.assertEqual(response.config.origin.https_port, 443) + self.assertEqual(response.config.origin.origin_protocol_policy, "http-only") + self.assertEqual(response.config.cnames, ["static.example.com"]) + self.assertTrue(response.config.enabled) + self.assertEqual(response.id, "EEEEEEEEEEEEE") + self.assertEqual(response.status, "InProgress") + self.assertEqual(response.domain_name, "abcdef12345678.cloudfront.net") + self.assertEqual(response.in_progress_invalidation_batches, 0) + + def test_create_distribution(self): + body = b""" + + EEEEEEEEEEEEEE + InProgress + 2014-02-04T10:34:07.873Z + 0 + d2000000000000.cloudfront.net + + + example.com + 80 + 443 + match-viewer + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + example.com distribution + false + + + """ + + self.set_http_response(status_code=201, body=body) + origin = CustomOrigin("example.com", origin_protocol_policy="match_viewer") + response = self.service_connection.create_distribution(origin, enabled=False, comment="example.com distribution") + + self.assertTrue(isinstance(response, Distribution)) + self.assertTrue(isinstance(response.config, DistributionConfig)) + self.assertTrue(isinstance(response.config.origin, CustomOrigin)) + self.assertEqual(response.config.origin.dns_name, "example.com") + self.assertEqual(response.config.origin.http_port, 80) + self.assertEqual(response.config.origin.https_port, 443) + self.assertEqual(response.config.origin.origin_protocol_policy, "match-viewer") + self.assertEqual(response.config.cnames, []) + self.assertTrue(not response.config.enabled) + self.assertEqual(response.id, "EEEEEEEEEEEEEE") + self.assertEqual(response.status, "InProgress") + self.assertEqual(response.domain_name, "d2000000000000.cloudfront.net") + self.assertEqual(response.in_progress_invalidation_batches, 0) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_distribution.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..38e27106c4c49c65825f3183a0b094cb456af997 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_distribution.py @@ -0,0 +1,21 @@ +import unittest + +from boto.cloudfront.distribution import DistributionConfig +from boto.cloudfront.logging import LoggingInfo + + +class CloudfrontDistributionTest(unittest.TestCase): + cloudfront = True + + def setUp(self): + self.dist = DistributionConfig() + + def test_logging(self): + # Default. + self.assertEqual(self.dist.logging, None) + + # Override. + lo = LoggingInfo(bucket='whatever', prefix='override_') + dist = DistributionConfig(logging=lo) + self.assertEqual(dist.logging.bucket, 'whatever') + self.assertEqual(dist.logging.prefix, 'override_') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_invalidation_list.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_invalidation_list.py new file mode 100644 index 0000000000000000000000000000000000000000..e2dea4c212569ed34903706639a538a6880b7b77 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_invalidation_list.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python +import random +import string +from tests.compat import unittest, mock + +import boto + + +RESPONSE_TEMPLATE = r""" + + + %(next_marker)s + %(max_items)s + %(is_truncated)s + %(inval_summaries)s + +""" + +INVAL_SUMMARY_TEMPLATE = r""" + + %(cfid)s + %(status)s + +""" + + +class CFInvalidationListTest(unittest.TestCase): + + cloudfront = True + + def setUp(self): + self.cf = boto.connect_cloudfront('aws.aws_access_key_id', + 'aws.aws_secret_access_key') + + def _get_random_id(self, length=14): + return ''.join([random.choice(string.ascii_letters) for i in + range(length)]) + + def _group_iter(self, iterator, n): + accumulator = [] + for item in iterator: + accumulator.append(item) + if len(accumulator) == n: + yield accumulator + accumulator = [] + if len(accumulator) != 0: + yield accumulator + + def _get_mock_responses(self, num, max_items): + max_items = min(max_items, 100) + cfid_groups = list(self._group_iter([self._get_random_id() for i in + range(num)], max_items)) + cfg = dict(status='Completed', max_items=max_items, next_marker='') + responses = [] + is_truncated = 'true' + for i, group in enumerate(cfid_groups): + next_marker = group[-1] + if (i + 1) == len(cfid_groups): + is_truncated = 'false' + next_marker = '' + invals = '' + cfg.update(dict(next_marker=next_marker, + is_truncated=is_truncated)) + for cfid in group: + cfg.update(dict(cfid=cfid)) + invals += INVAL_SUMMARY_TEMPLATE % cfg + cfg.update(dict(inval_summaries=invals)) + mock_response = mock.Mock() + mock_response.read.return_value = (RESPONSE_TEMPLATE % cfg).encode('utf-8') + mock_response.status = 200 + responses.append(mock_response) + return responses + + def test_manual_pagination(self, num_invals=30, max_items=4): + """ + Test that paginating manually works properly + """ + self.assertGreater(num_invals, max_items) + responses = self._get_mock_responses(num=num_invals, + max_items=max_items) + self.cf.make_request = mock.Mock(side_effect=responses) + ir = self.cf.get_invalidation_requests('dist-id-here', + max_items=max_items) + all_invals = list(ir) + self.assertEqual(len(all_invals), max_items) + while ir.is_truncated: + ir = self.cf.get_invalidation_requests('dist-id-here', + marker=ir.next_marker, + max_items=max_items) + invals = list(ir) + self.assertLessEqual(len(invals), max_items) + all_invals.extend(invals) + remainder = num_invals % max_items + if remainder != 0: + self.assertEqual(len(invals), remainder) + self.assertEqual(len(all_invals), num_invals) + + def test_auto_pagination(self, num_invals=1024): + """ + Test that auto-pagination works properly + """ + max_items = 100 + self.assertGreaterEqual(num_invals, max_items) + responses = self._get_mock_responses(num=num_invals, + max_items=max_items) + self.cf.make_request = mock.Mock(side_effect=responses) + ir = self.cf.get_invalidation_requests('dist-id-here') + self.assertEqual(len(ir._inval_cache), max_items) + self.assertEqual(len(list(ir)), num_invals) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_signed_urls.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_signed_urls.py new file mode 100644 index 0000000000000000000000000000000000000000..c1a90452596f88979b442f4950bf1fe22ccf8388 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudfront/test_signed_urls.py @@ -0,0 +1,367 @@ +import tempfile +import unittest +from boto.compat import StringIO, six, json +from textwrap import dedent + +from boto.cloudfront.distribution import Distribution + +class CloudfrontSignedUrlsTest(unittest.TestCase): + + cloudfront = True + notdefault = True + + def setUp(self): + self.pk_str = dedent(""" + -----BEGIN RSA PRIVATE KEY----- + MIICXQIBAAKBgQDA7ki9gI/lRygIoOjV1yymgx6FYFlzJ+z1ATMaLo57nL57AavW + hb68HYY8EA0GJU9xQdMVaHBogF3eiCWYXSUZCWM/+M5+ZcdQraRRScucmn6g4EvY + 2K4W2pxbqH8vmUikPxir41EeBPLjMOzKvbzzQy9e/zzIQVREKSp/7y1mywIDAQAB + AoGABc7mp7XYHynuPZxChjWNJZIq+A73gm0ASDv6At7F8Vi9r0xUlQe/v0AQS3yc + N8QlyR4XMbzMLYk3yjxFDXo4ZKQtOGzLGteCU2srANiLv26/imXA8FVidZftTAtL + viWQZBVPTeYIA69ATUYPEq0a5u5wjGyUOij9OWyuy01mbPkCQQDluYoNpPOekQ0Z + WrPgJ5rxc8f6zG37ZVoDBiexqtVShIF5W3xYuWhW5kYb0hliYfkq15cS7t9m95h3 + 1QJf/xI/AkEA1v9l/WN1a1N3rOK4VGoCokx7kR2SyTMSbZgF9IWJNOugR/WZw7HT + njipO3c9dy1Ms9pUKwUF46d7049ck8HwdQJARgrSKuLWXMyBH+/l1Dx/I4tXuAJI + rlPyo+VmiOc7b5NzHptkSHEPfR9s1OK0VqjknclqCJ3Ig86OMEtEFBzjZQJBAKYz + 470hcPkaGk7tKYAgP48FvxRsnzeooptURW5E+M+PQ2W9iDPPOX9739+Xi02hGEWF + B0IGbQoTRFdE4VVcPK0CQQCeS84lODlC0Y2BZv2JxW3Osv/WkUQ4dslfAQl1T303 + 7uwwr7XTroMv8dIFQIPreoPhRKmd/SbJzbiKfS/4QDhU + -----END RSA PRIVATE KEY----- + """) + self.pk_id = "PK123456789754" + self.dist = Distribution() + self.canned_policy = ( + '{"Statement":[{"Resource":' + '"http://d604721fxaaqy9.cloudfront.net/horizon.jpg' + '?large=yes&license=yes",' + '"Condition":{"DateLessThan":{"AWS:EpochTime":1258237200}}}]}') + self.custom_policy_1 = ( + '{ \n' + ' "Statement": [{ \n' + ' "Resource":"http://d604721fxaaqy9.cloudfront.net/training/*", \n' + ' "Condition":{ \n' + ' "IpAddress":{"AWS:SourceIp":"145.168.143.0/24"}, \n' + ' "DateLessThan":{"AWS:EpochTime":1258237200} \n' + ' } \n' + ' }] \n' + '}\n') + self.custom_policy_2 = ( + '{ \n' + ' "Statement": [{ \n' + ' "Resource":"http://*", \n' + ' "Condition":{ \n' + ' "IpAddress":{"AWS:SourceIp":"216.98.35.1/32"},\n' + ' "DateGreaterThan":{"AWS:EpochTime":1241073790},\n' + ' "DateLessThan":{"AWS:EpochTime":1255674716}\n' + ' } \n' + ' }] \n' + '}\n') + + def test_encode_custom_policy_1(self): + """ + Test base64 encoding custom policy 1 from Amazon's documentation. + """ + expected = ("eyAKICAgIlN0YXRlbWVudCI6IFt7IAogICAgICAiUmVzb3VyY2Ui" + "OiJodHRwOi8vZDYwNDcyMWZ4YWFxeTkuY2xvdWRmcm9udC5uZXQv" + "dHJhaW5pbmcvKiIsIAogICAgICAiQ29uZGl0aW9uIjp7IAogICAg" + "ICAgICAiSXBBZGRyZXNzIjp7IkFXUzpTb3VyY2VJcCI6IjE0NS4x" + "NjguMTQzLjAvMjQifSwgCiAgICAgICAgICJEYXRlTGVzc1RoYW4i" + "OnsiQVdTOkVwb2NoVGltZSI6MTI1ODIzNzIwMH0gICAgICAKICAg" + "ICAgfSAKICAgfV0gCn0K") + encoded = self.dist._url_base64_encode(self.custom_policy_1) + self.assertEqual(expected, encoded) + + def test_encode_custom_policy_2(self): + """ + Test base64 encoding custom policy 2 from Amazon's documentation. + """ + expected = ("eyAKICAgIlN0YXRlbWVudCI6IFt7IAogICAgICAiUmVzb3VyY2Ui" + "OiJodHRwOi8vKiIsIAogICAgICAiQ29uZGl0aW9uIjp7IAogICAg" + "ICAgICAiSXBBZGRyZXNzIjp7IkFXUzpTb3VyY2VJcCI6IjIxNi45" + "OC4zNS4xLzMyIn0sCiAgICAgICAgICJEYXRlR3JlYXRlclRoYW4i" + "OnsiQVdTOkVwb2NoVGltZSI6MTI0MTA3Mzc5MH0sCiAgICAgICAg" + "ICJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTI1NTY3" + "NDcxNn0KICAgICAgfSAKICAgfV0gCn0K") + encoded = self.dist._url_base64_encode(self.custom_policy_2) + self.assertEqual(expected, encoded) + + def test_sign_canned_policy(self): + """ + Test signing the canned policy from amazon's cloudfront documentation. + """ + expected = ("Nql641NHEUkUaXQHZINK1FZ~SYeUSoBJMxjdgqrzIdzV2gyEXPDN" + "v0pYdWJkflDKJ3xIu7lbwRpSkG98NBlgPi4ZJpRRnVX4kXAJK6td" + "Nx6FucDB7OVqzcxkxHsGFd8VCG1BkC-Afh9~lOCMIYHIaiOB6~5j" + "t9w2EOwi6sIIqrg_") + sig = self.dist._sign_string(self.canned_policy, private_key_string=self.pk_str) + encoded_sig = self.dist._url_base64_encode(sig) + self.assertEqual(expected, encoded_sig) + + def test_sign_canned_policy_pk_file(self): + """ + Test signing the canned policy from amazon's cloudfront documentation + with a file object. + """ + expected = ("Nql641NHEUkUaXQHZINK1FZ~SYeUSoBJMxjdgqrzIdzV2gyEXPDN" + "v0pYdWJkflDKJ3xIu7lbwRpSkG98NBlgPi4ZJpRRnVX4kXAJK6td" + "Nx6FucDB7OVqzcxkxHsGFd8VCG1BkC-Afh9~lOCMIYHIaiOB6~5j" + "t9w2EOwi6sIIqrg_") + pk_file = tempfile.TemporaryFile() + pk_file.write(self.pk_str) + pk_file.seek(0) + sig = self.dist._sign_string(self.canned_policy, private_key_file=pk_file) + encoded_sig = self.dist._url_base64_encode(sig) + self.assertEqual(expected, encoded_sig) + + def test_sign_canned_policy_pk_file_name(self): + """ + Test signing the canned policy from amazon's cloudfront documentation + with a file name. + """ + expected = ("Nql641NHEUkUaXQHZINK1FZ~SYeUSoBJMxjdgqrzIdzV2gyEXPDN" + "v0pYdWJkflDKJ3xIu7lbwRpSkG98NBlgPi4ZJpRRnVX4kXAJK6td" + "Nx6FucDB7OVqzcxkxHsGFd8VCG1BkC-Afh9~lOCMIYHIaiOB6~5j" + "t9w2EOwi6sIIqrg_") + pk_file = tempfile.NamedTemporaryFile() + pk_file.write(self.pk_str) + pk_file.flush() + sig = self.dist._sign_string(self.canned_policy, private_key_file=pk_file.name) + encoded_sig = self.dist._url_base64_encode(sig) + self.assertEqual(expected, encoded_sig) + + def test_sign_canned_policy_pk_file_like(self): + """ + Test signing the canned policy from amazon's cloudfront documentation + with a file-like object (not a subclass of 'file' type) + """ + expected = ("Nql641NHEUkUaXQHZINK1FZ~SYeUSoBJMxjdgqrzIdzV2gyEXPDN" + "v0pYdWJkflDKJ3xIu7lbwRpSkG98NBlgPi4ZJpRRnVX4kXAJK6td" + "Nx6FucDB7OVqzcxkxHsGFd8VCG1BkC-Afh9~lOCMIYHIaiOB6~5j" + "t9w2EOwi6sIIqrg_") + pk_file = StringIO() + pk_file.write(self.pk_str) + pk_file.seek(0) + sig = self.dist._sign_string(self.canned_policy, private_key_file=pk_file) + encoded_sig = self.dist._url_base64_encode(sig) + self.assertEqual(expected, encoded_sig) + + def test_sign_canned_policy_unicode(self): + """ + Test signing the canned policy from amazon's cloudfront documentation. + """ + expected = ("Nql641NHEUkUaXQHZINK1FZ~SYeUSoBJMxjdgqrzIdzV2gyEXPDN" + "v0pYdWJkflDKJ3xIu7lbwRpSkG98NBlgPi4ZJpRRnVX4kXAJK6td" + "Nx6FucDB7OVqzcxkxHsGFd8VCG1BkC-Afh9~lOCMIYHIaiOB6~5j" + "t9w2EOwi6sIIqrg_") + unicode_policy = six.text_type(self.canned_policy) + sig = self.dist._sign_string(unicode_policy, private_key_string=self.pk_str) + encoded_sig = self.dist._url_base64_encode(sig) + self.assertEqual(expected, encoded_sig) + + def test_sign_custom_policy_1(self): + """ + Test signing custom policy 1 from amazon's cloudfront documentation. + """ + expected = ("cPFtRKvUfYNYmxek6ZNs6vgKEZP6G3Cb4cyVt~FjqbHOnMdxdT7e" + "T6pYmhHYzuDsFH4Jpsctke2Ux6PCXcKxUcTIm8SO4b29~1QvhMl-" + "CIojki3Hd3~Unxjw7Cpo1qRjtvrimW0DPZBZYHFZtiZXsaPt87yB" + "P9GWnTQoaVysMxQ_") + sig = self.dist._sign_string(self.custom_policy_1, private_key_string=self.pk_str) + encoded_sig = self.dist._url_base64_encode(sig) + self.assertEqual(expected, encoded_sig) + + def test_sign_custom_policy_2(self): + """ + Test signing custom policy 2 from amazon's cloudfront documentation. + """ + expected = ("rc~5Qbbm8EJXjUTQ6Cn0LAxR72g1DOPrTmdtfbWVVgQNw0q~KHUA" + "mBa2Zv1Wjj8dDET4XSL~Myh44CLQdu4dOH~N9huH7QfPSR~O4tIO" + "S1WWcP~2JmtVPoQyLlEc8YHRCuN3nVNZJ0m4EZcXXNAS-0x6Zco2" + "SYx~hywTRxWR~5Q_") + sig = self.dist._sign_string(self.custom_policy_2, private_key_string=self.pk_str) + encoded_sig = self.dist._url_base64_encode(sig) + self.assertEqual(expected, encoded_sig) + + def test_create_canned_policy(self): + """ + Test that a canned policy is generated correctly. + """ + url = "http://1234567.cloudfront.com/test_resource.mp3?dog=true" + expires = 999999 + policy = self.dist._canned_policy(url, expires) + policy = json.loads(policy) + + self.assertEqual(1, len(policy.keys())) + statements = policy["Statement"] + self.assertEqual(1, len(statements)) + statement = statements[0] + resource = statement["Resource"] + self.assertEqual(url, resource) + condition = statement["Condition"] + self.assertEqual(1, len(condition.keys())) + date_less_than = condition["DateLessThan"] + self.assertEqual(1, len(date_less_than.keys())) + aws_epoch_time = date_less_than["AWS:EpochTime"] + self.assertEqual(expires, aws_epoch_time) + + def test_custom_policy_expires_and_policy_url(self): + """ + Test that a custom policy can be created with an expire time and an + arbitrary URL. + """ + url = "http://1234567.cloudfront.com/*" + expires = 999999 + policy = self.dist._custom_policy(url, expires=expires) + policy = json.loads(policy) + + self.assertEqual(1, len(policy.keys())) + statements = policy["Statement"] + self.assertEqual(1, len(statements)) + statement = statements[0] + resource = statement["Resource"] + self.assertEqual(url, resource) + condition = statement["Condition"] + self.assertEqual(1, len(condition.keys())) + date_less_than = condition["DateLessThan"] + self.assertEqual(1, len(date_less_than.keys())) + aws_epoch_time = date_less_than["AWS:EpochTime"] + self.assertEqual(expires, aws_epoch_time) + + def test_custom_policy_valid_after(self): + """ + Test that a custom policy can be created with a valid-after time and + an arbitrary URL. + """ + url = "http://1234567.cloudfront.com/*" + valid_after = 999999 + policy = self.dist._custom_policy(url, valid_after=valid_after) + policy = json.loads(policy) + + self.assertEqual(1, len(policy.keys())) + statements = policy["Statement"] + self.assertEqual(1, len(statements)) + statement = statements[0] + resource = statement["Resource"] + self.assertEqual(url, resource) + condition = statement["Condition"] + self.assertEqual(2, len(condition.keys())) + date_less_than = condition["DateLessThan"] + date_greater_than = condition["DateGreaterThan"] + self.assertEqual(1, len(date_greater_than.keys())) + aws_epoch_time = date_greater_than["AWS:EpochTime"] + self.assertEqual(valid_after, aws_epoch_time) + + def test_custom_policy_ip_address(self): + """ + Test that a custom policy can be created with an IP address and + an arbitrary URL. + """ + url = "http://1234567.cloudfront.com/*" + ip_range = "192.168.0.1" + policy = self.dist._custom_policy(url, ip_address=ip_range) + policy = json.loads(policy) + + self.assertEqual(1, len(policy.keys())) + statements = policy["Statement"] + self.assertEqual(1, len(statements)) + statement = statements[0] + resource = statement["Resource"] + self.assertEqual(url, resource) + condition = statement["Condition"] + self.assertEqual(2, len(condition.keys())) + ip_address = condition["IpAddress"] + self.assertTrue("DateLessThan" in condition) + self.assertEqual(1, len(ip_address.keys())) + source_ip = ip_address["AWS:SourceIp"] + self.assertEqual("%s/32" % ip_range, source_ip) + + def test_custom_policy_ip_range(self): + """ + Test that a custom policy can be created with an IP address and + an arbitrary URL. + """ + url = "http://1234567.cloudfront.com/*" + ip_range = "192.168.0.0/24" + policy = self.dist._custom_policy(url, ip_address=ip_range) + policy = json.loads(policy) + + self.assertEqual(1, len(policy.keys())) + statements = policy["Statement"] + self.assertEqual(1, len(statements)) + statement = statements[0] + resource = statement["Resource"] + self.assertEqual(url, resource) + condition = statement["Condition"] + self.assertEqual(2, len(condition.keys())) + self.assertTrue("DateLessThan" in condition) + ip_address = condition["IpAddress"] + self.assertEqual(1, len(ip_address.keys())) + source_ip = ip_address["AWS:SourceIp"] + self.assertEqual(ip_range, source_ip) + + def test_custom_policy_all(self): + """ + Test that a custom policy can be created with an IP address and + an arbitrary URL. + """ + url = "http://1234567.cloudfront.com/test.txt" + expires = 999999 + valid_after = 111111 + ip_range = "192.168.0.0/24" + policy = self.dist._custom_policy(url, expires=expires, + valid_after=valid_after, + ip_address=ip_range) + policy = json.loads(policy) + + self.assertEqual(1, len(policy.keys())) + statements = policy["Statement"] + self.assertEqual(1, len(statements)) + statement = statements[0] + resource = statement["Resource"] + self.assertEqual(url, resource) + condition = statement["Condition"] + self.assertEqual(3, len(condition.keys())) + #check expires condition + date_less_than = condition["DateLessThan"] + self.assertEqual(1, len(date_less_than.keys())) + aws_epoch_time = date_less_than["AWS:EpochTime"] + self.assertEqual(expires, aws_epoch_time) + #check valid_after condition + date_greater_than = condition["DateGreaterThan"] + self.assertEqual(1, len(date_greater_than.keys())) + aws_epoch_time = date_greater_than["AWS:EpochTime"] + self.assertEqual(valid_after, aws_epoch_time) + #check source ip address condition + ip_address = condition["IpAddress"] + self.assertEqual(1, len(ip_address.keys())) + source_ip = ip_address["AWS:SourceIp"] + self.assertEqual(ip_range, source_ip) + + def test_params_canned_policy(self): + """ + Test the correct params are generated for a canned policy. + """ + url = "http://d604721fxaaqy9.cloudfront.net/horizon.jpg?large=yes&license=yes" + expire_time = 1258237200 + expected_sig = ("Nql641NHEUkUaXQHZINK1FZ~SYeUSoBJMxjdgqrzIdzV2gyE" + "XPDNv0pYdWJkflDKJ3xIu7lbwRpSkG98NBlgPi4ZJpRRnVX4" + "kXAJK6tdNx6FucDB7OVqzcxkxHsGFd8VCG1BkC-Afh9~lOCM" + "IYHIaiOB6~5jt9w2EOwi6sIIqrg_") + signed_url_params = self.dist._create_signing_params(url, self.pk_id, expire_time, private_key_string=self.pk_str) + self.assertEqual(3, len(signed_url_params)) + self.assertEqual(signed_url_params["Expires"], "1258237200") + self.assertEqual(signed_url_params["Signature"], expected_sig) + self.assertEqual(signed_url_params["Key-Pair-Id"], "PK123456789754") + + def test_canned_policy(self): + """ + Generate signed url from the Example Canned Policy in Amazon's + documentation. + """ + url = "http://d604721fxaaqy9.cloudfront.net/horizon.jpg?large=yes&license=yes" + expire_time = 1258237200 + expected_url = "http://d604721fxaaqy9.cloudfront.net/horizon.jpg?large=yes&license=yes&Expires=1258237200&Signature=Nql641NHEUkUaXQHZINK1FZ~SYeUSoBJMxjdgqrzIdzV2gyEXPDNv0pYdWJkflDKJ3xIu7lbwRpSkG98NBlgPi4ZJpRRnVX4kXAJK6tdNx6FucDB7OVqzcxkxHsGFd8VCG1BkC-Afh9~lOCMIYHIaiOB6~5jt9w2EOwi6sIIqrg_&Key-Pair-Id=PK123456789754" + signed_url = self.dist.create_signed_url( + url, self.pk_id, expire_time, private_key_string=self.pk_str) + self.assertEqual(expected_url, signed_url) + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/__init__.py @@ -0,0 +1 @@ + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..64c2b572ffc8cc6c4400d93148e85812e41fe8ae --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_connection.py @@ -0,0 +1,230 @@ +#!/usr/bin env python + +from tests.unit import AWSMockServiceTestCase + +from boto.cloudsearch.domain import Domain +from boto.cloudsearch.layer1 import Layer1 + +class TestCloudSearchCreateDomain(AWSMockServiceTestCase): + connection_class = Layer1 + + def default_body(self): + return b""" + + + + 0 + + arn:aws:cs:us-east-1:1234567890:search/demo + search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com + + 0 + true + 1234567890/demo + false + 0 + demo + false + false + + arn:aws:cs:us-east-1:1234567890:doc/demo + doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com + + + + + 00000000-0000-0000-0000-000000000000 + + +""" + + def test_create_domain(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + + self.assert_request_parameters({ + 'Action': 'CreateDomain', + 'DomainName': 'demo', + 'Version': '2011-02-01', + }) + + def test_cloudsearch_connect_result_endpoints(self): + """Check that endpoints & ARNs are correctly returned from AWS""" + + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response) + + self.assertEqual(domain.doc_service_arn, + "arn:aws:cs:us-east-1:1234567890:doc/demo") + self.assertEqual( + domain.doc_service_endpoint, + "doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + self.assertEqual(domain.search_service_arn, + "arn:aws:cs:us-east-1:1234567890:search/demo") + self.assertEqual( + domain.search_service_endpoint, + "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + def test_cloudsearch_connect_result_statuses(self): + """Check that domain statuses are correctly returned from AWS""" + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response) + + self.assertEqual(domain.created, True) + self.assertEqual(domain.processing, False) + self.assertEqual(domain.requires_index_documents, False) + self.assertEqual(domain.deleted, False) + + def test_cloudsearch_connect_result_details(self): + """Check that the domain information is correctly returned from AWS""" + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response) + + self.assertEqual(domain.id, "1234567890/demo") + self.assertEqual(domain.name, "demo") + + def test_cloudsearch_documentservice_creation(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response) + + document = domain.get_document_service() + + self.assertEqual( + document.endpoint, + "doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + def test_cloudsearch_searchservice_creation(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response) + + search = domain.get_search_service() + + self.assertEqual( + search.endpoint, + "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + +class CloudSearchConnectionDeletionTest(AWSMockServiceTestCase): + connection_class = Layer1 + + def default_body(self): + return b""" + + + + 0 + + arn:aws:cs:us-east-1:1234567890:search/demo + search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com + + 0 + true + 1234567890/demo + false + 0 + demo + false + false + + arn:aws:cs:us-east-1:1234567890:doc/demo + doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com + + + + + 00000000-0000-0000-0000-000000000000 + + +""" + + def test_cloudsearch_deletion(self): + """ + Check that the correct arguments are sent to AWS when creating a + cloudsearch connection. + """ + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_domain('demo') + + self.assert_request_parameters({ + 'Action': 'DeleteDomain', + 'DomainName': 'demo', + 'Version': '2011-02-01', + }) + + +class CloudSearchConnectionIndexDocumentTest(AWSMockServiceTestCase): + connection_class = Layer1 + + def default_body(self): + return b""" + + + + average_score + brand_id + colors + context + context_owner + created_at + creator_id + description + file_size + format + has_logo + has_messaging + height + image_id + ingested_from + is_advertising + is_photo + is_reviewed + modified_at + subject_date + tags + title + width + + + + eb2b2390-6bbd-11e2-ab66-93f3a90dcf2a + + +""" + + def test_cloudsearch_index_documents(self): + """ + Check that the correct arguments are sent to AWS when indexing a + domain. + """ + self.set_http_response(status_code=200) + api_response = self.service_connection.index_documents('demo') + + self.assert_request_parameters({ + 'Action': 'IndexDocuments', + 'DomainName': 'demo', + 'Version': '2011-02-01', + }) + + def test_cloudsearch_index_documents_resp(self): + """ + Check that the AWS response is being parsed correctly when indexing a + domain. + """ + self.set_http_response(status_code=200) + api_response = self.service_connection.index_documents('demo') + + self.assertEqual(api_response, ['average_score', 'brand_id', 'colors', + 'context', 'context_owner', + 'created_at', 'creator_id', + 'description', 'file_size', 'format', + 'has_logo', 'has_messaging', 'height', + 'image_id', 'ingested_from', + 'is_advertising', 'is_photo', + 'is_reviewed', 'modified_at', + 'subject_date', 'tags', 'title', + 'width']) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_document.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_document.py new file mode 100644 index 0000000000000000000000000000000000000000..929b62be65ed323c641229ea75a3098f0a36b479 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_document.py @@ -0,0 +1,337 @@ +#!/usr/bin env python + +from tests.unit import unittest +from httpretty import HTTPretty +from mock import MagicMock + +import json + +from boto.cloudsearch.document import DocumentServiceConnection +from boto.cloudsearch.document import CommitMismatchError, EncodingError, \ + ContentTooLongError, DocumentServiceConnection, SearchServiceException + +import boto + +class CloudSearchDocumentTest(unittest.TestCase): + def setUp(self): + HTTPretty.enable() + HTTPretty.register_uri( + HTTPretty.POST, + ("http://doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com/" + "2011-02-01/documents/batch"), + body=json.dumps(self.response).encode('utf-8'), + content_type="application/json") + + def tearDown(self): + HTTPretty.disable() + +class CloudSearchDocumentSingleTest(CloudSearchDocumentTest): + + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + def test_cloudsearch_add_basics(self): + """ + Check that a simple add document actually sends an add document request + to AWS. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + document.commit() + + args = json.loads(HTTPretty.last_request.body.decode('utf-8'))[0] + + self.assertEqual(args['lang'], 'en') + self.assertEqual(args['type'], 'add') + + def test_cloudsearch_add_single_basic(self): + """ + Check that a simple add document sends correct document metadata to + AWS. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + document.commit() + + args = json.loads(HTTPretty.last_request.body.decode('utf-8'))[0] + + self.assertEqual(args['id'], '1234') + self.assertEqual(args['version'], 10) + self.assertEqual(args['type'], 'add') + + def test_cloudsearch_add_single_fields(self): + """ + Check that a simple add document sends the actual document to AWS. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + document.commit() + + args = json.loads(HTTPretty.last_request.body.decode('utf-8'))[0] + + self.assertEqual(args['fields']['category'], ['cat_a', 'cat_b', + 'cat_c']) + self.assertEqual(args['fields']['id'], '1234') + self.assertEqual(args['fields']['title'], 'Title 1') + + def test_cloudsearch_add_single_result(self): + """ + Check that the reply from adding a single document is correctly parsed. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + doc = document.commit() + + self.assertEqual(doc.status, 'success') + self.assertEqual(doc.adds, 1) + self.assertEqual(doc.deletes, 0) + + self.assertEqual(doc.doc_service, document) + + +class CloudSearchDocumentMultipleAddTest(CloudSearchDocumentTest): + + response = { + 'status': 'success', + 'adds': 3, + 'deletes': 0, + } + + objs = { + '1234': { + 'version': 10, 'fields': {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", + "cat_c"]}}, + '1235': { + 'version': 11, 'fields': {"id": "1235", "title": "Title 2", + "category": ["cat_b", "cat_c", + "cat_d"]}}, + '1236': { + 'version': 12, 'fields': {"id": "1236", "title": "Title 3", + "category": ["cat_e", "cat_f", + "cat_g"]}}, + } + + + def test_cloudsearch_add_basics(self): + """Check that multiple documents are added correctly to AWS""" + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + for (key, obj) in self.objs.items(): + document.add(key, obj['version'], obj['fields']) + document.commit() + + args = json.loads(HTTPretty.last_request.body.decode('utf-8')) + + for arg in args: + self.assertTrue(arg['id'] in self.objs) + self.assertEqual(arg['version'], self.objs[arg['id']]['version']) + self.assertEqual(arg['fields']['id'], + self.objs[arg['id']]['fields']['id']) + self.assertEqual(arg['fields']['title'], + self.objs[arg['id']]['fields']['title']) + self.assertEqual(arg['fields']['category'], + self.objs[arg['id']]['fields']['category']) + + def test_cloudsearch_add_results(self): + """ + Check that the result from adding multiple documents is parsed + correctly. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + for (key, obj) in self.objs.items(): + document.add(key, obj['version'], obj['fields']) + doc = document.commit() + + self.assertEqual(doc.status, 'success') + self.assertEqual(doc.adds, len(self.objs)) + self.assertEqual(doc.deletes, 0) + + +class CloudSearchDocumentDelete(CloudSearchDocumentTest): + + response = { + 'status': 'success', + 'adds': 0, + 'deletes': 1, + } + + def test_cloudsearch_delete(self): + """ + Test that the request for a single document deletion is done properly. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.delete("5", "10") + document.commit() + args = json.loads(HTTPretty.last_request.body.decode('utf-8'))[0] + + self.assertEqual(args['version'], '10') + self.assertEqual(args['type'], 'delete') + self.assertEqual(args['id'], '5') + + def test_cloudsearch_delete_results(self): + """ + Check that the result of a single document deletion is parsed properly. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.delete("5", "10") + doc = document.commit() + + self.assertEqual(doc.status, 'success') + self.assertEqual(doc.adds, 0) + self.assertEqual(doc.deletes, 1) + + +class CloudSearchDocumentDeleteMultiple(CloudSearchDocumentTest): + response = { + 'status': 'success', + 'adds': 0, + 'deletes': 2, + } + + def test_cloudsearch_delete_multiples(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.delete("5", "10") + document.delete("6", "11") + document.commit() + args = json.loads(HTTPretty.last_request.body.decode('utf-8')) + + self.assertEqual(len(args), 2) + for arg in args: + self.assertEqual(arg['type'], 'delete') + + if arg['id'] == '5': + self.assertEqual(arg['version'], '10') + elif arg['id'] == '6': + self.assertEqual(arg['version'], '11') + else: # Unknown result out of AWS that shouldn't be there + self.assertTrue(False) + + +class CloudSearchSDFManipulation(CloudSearchDocumentTest): + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + + def test_cloudsearch_initial_sdf_is_blank(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + self.assertEqual(document.get_sdf(), '[]') + + def test_cloudsearch_single_document_sdf(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + + self.assertNotEqual(document.get_sdf(), '[]') + + document.clear_sdf() + + self.assertEqual(document.get_sdf(), '[]') + +class CloudSearchBadSDFTesting(CloudSearchDocumentTest): + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + def test_cloudsearch_erroneous_sdf(self): + original = boto.log.error + boto.log.error = MagicMock() + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + document.add("1234", 10, {"id": "1234", "title": None, + "category": ["cat_a", "cat_b", "cat_c"]}) + + document.commit() + self.assertNotEqual(len(boto.log.error.call_args_list), 1) + + boto.log.error = original + + +class CloudSearchDocumentErrorBadUnicode(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'adds': 0, + 'deletes': 0, + 'errors': [{'message': 'Illegal Unicode character in document'}] + } + + def test_fake_bad_unicode(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + self.assertRaises(EncodingError, document.commit) + + +class CloudSearchDocumentErrorDocsTooBig(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'adds': 0, + 'deletes': 0, + 'errors': [{'message': 'The Content-Length is too long'}] + } + + def test_fake_docs_too_big(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + + self.assertRaises(ContentTooLongError, document.commit) + + +class CloudSearchDocumentErrorMismatch(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'adds': 0, + 'deletes': 0, + 'errors': [{'message': 'Something went wrong'}] + } + + def test_fake_failure(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + + self.assertRaises(CommitMismatchError, document.commit) + +class CloudSearchDocumentsErrorMissingAdds(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'deletes': 0, + 'errors': [{'message': 'Unknown error message'}] + } + + def test_fake_failure(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + self.assertRaises(SearchServiceException, document.commit) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_exceptions.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..03c31ecb1de91d8e7428346d0476fcb7af356d66 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_exceptions.py @@ -0,0 +1,37 @@ +from boto.compat import json +from tests.compat import mock, unittest + +from tests.unit.cloudsearch.test_search import HOSTNAME, \ + CloudSearchSearchBaseTest +from boto.cloudsearch.search import SearchConnection, SearchServiceException + + +def fake_loads_value_error(content, *args, **kwargs): + """Callable to generate a fake ValueError""" + raise ValueError("HAHAHA! Totally not simplejson & you gave me bad JSON.") + + +def fake_loads_json_error(content, *args, **kwargs): + """Callable to generate a fake JSONDecodeError""" + raise json.JSONDecodeError('Using simplejson & you gave me bad JSON.', + '', 0) + + +class CloudSearchJSONExceptionTest(CloudSearchSearchBaseTest): + response = b'{}' + + def test_no_simplejson_value_error(self): + with mock.patch.object(json, 'loads', fake_loads_value_error): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaisesRegexp(SearchServiceException, 'non-json'): + search.search(q='test') + + @unittest.skipUnless(hasattr(json, 'JSONDecodeError'), + 'requires simplejson') + def test_simplejson_jsondecodeerror(self): + with mock.patch.object(json, 'loads', fake_loads_json_error): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaisesRegexp(SearchServiceException, 'non-json'): + search.search(q='test') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_search.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_search.py new file mode 100644 index 0000000000000000000000000000000000000000..e8097640a0e8bc6e9667b547915758c96e89b609 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch/test_search.py @@ -0,0 +1,428 @@ +#!/usr/bin env python + +from tests.compat import mock, unittest +from httpretty import HTTPretty + +import json +import requests + +from boto.cloudsearch.search import SearchConnection, SearchServiceException +from boto.compat import six, map + +HOSTNAME = "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com" +FULL_URL = 'http://%s/2011-02-01/search' % HOSTNAME + + +class CloudSearchSearchBaseTest(unittest.TestCase): + + hits = [ + { + 'id': '12341', + 'title': 'Document 1', + }, + { + 'id': '12342', + 'title': 'Document 2', + }, + { + 'id': '12343', + 'title': 'Document 3', + }, + { + 'id': '12344', + 'title': 'Document 4', + }, + { + 'id': '12345', + 'title': 'Document 5', + }, + { + 'id': '12346', + 'title': 'Document 6', + }, + { + 'id': '12347', + 'title': 'Document 7', + }, + ] + + content_type = "text/xml" + response_status = 200 + + def get_args(self, requestline): + (_, request, _) = requestline.split(b" ") + (_, request) = request.split(b"?", 1) + args = six.moves.urllib.parse.parse_qs(request) + return args + + def setUp(self): + HTTPretty.enable() + body = self.response + + if not isinstance(body, bytes): + body = json.dumps(body).encode('utf-8') + + HTTPretty.register_uri(HTTPretty.GET, FULL_URL, + body=body, + content_type=self.content_type, + status=self.response_status) + + def tearDown(self): + HTTPretty.disable() + +class CloudSearchSearchTest(CloudSearchSearchBaseTest): + response = { + 'rank': '-text_relevance', + 'match-expr': "Test", + 'hits': { + 'found': 30, + 'start': 0, + 'hit': CloudSearchSearchBaseTest.hits + }, + 'info': { + 'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08', + 'time-ms': 2, + 'cpu-time-ms': 0 + } + + } + + def test_cloudsearch_qsearch(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test') + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'q'], [b"Test"]) + self.assertEqual(args[b'start'], [b"0"]) + self.assertEqual(args[b'size'], [b"10"]) + + def test_cloudsearch_bqsearch(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(bq="'Test'") + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'bq'], [b"'Test'"]) + + def test_cloudsearch_search_details(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', size=50, start=20) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'q'], [b"Test"]) + self.assertEqual(args[b'size'], [b"50"]) + self.assertEqual(args[b'start'], [b"20"]) + + def test_cloudsearch_facet_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet=["Author"]) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'facet'], [b"Author"]) + + def test_cloudsearch_facet_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet=["author", "cat"]) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'facet'], [b"author,cat"]) + + def test_cloudsearch_facet_constraint_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search( + q='Test', + facet_constraints={'author': "'John Smith','Mark Smith'"}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'facet-author-constraints'], + [b"'John Smith','Mark Smith'"]) + + def test_cloudsearch_facet_constraint_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search( + q='Test', + facet_constraints={'author': "'John Smith','Mark Smith'", + 'category': "'News','Reviews'"}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'facet-author-constraints'], + [b"'John Smith','Mark Smith'"]) + self.assertEqual(args[b'facet-category-constraints'], + [b"'News','Reviews'"]) + + def test_cloudsearch_facet_sort_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet_sort={'author': 'alpha'}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'facet-author-sort'], [b'alpha']) + + def test_cloudsearch_facet_sort_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet_sort={'author': 'alpha', + 'cat': 'count'}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'facet-author-sort'], [b'alpha']) + self.assertEqual(args[b'facet-cat-sort'], [b'count']) + + def test_cloudsearch_top_n_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet_top_n={'author': 5}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'facet-author-top-n'], [b'5']) + + def test_cloudsearch_top_n_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet_top_n={'author': 5, 'cat': 10}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'facet-author-top-n'], [b'5']) + self.assertEqual(args[b'facet-cat-top-n'], [b'10']) + + def test_cloudsearch_rank_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', rank=["date"]) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'rank'], [b'date']) + + def test_cloudsearch_rank_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', rank=["date", "score"]) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'rank'], [b'date,score']) + + def test_cloudsearch_result_fields_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', return_fields=['author']) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'return-fields'], [b'author']) + + def test_cloudsearch_result_fields_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', return_fields=['author', 'title']) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'return-fields'], [b'author,title']) + + def test_cloudsearch_t_field_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', t={'year': '2001..2007'}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b't-year'], [b'2001..2007']) + + def test_cloudsearch_t_field_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', t={'year': '2001..2007', 'score': '10..50'}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b't-year'], [b'2001..2007']) + self.assertEqual(args[b't-score'], [b'10..50']) + + def test_cloudsearch_results_meta(self): + """Check returned metadata is parsed correctly""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + # These rely on the default response which is fed into HTTPretty + self.assertEqual(results.rank, "-text_relevance") + self.assertEqual(results.match_expression, "Test") + + def test_cloudsearch_results_info(self): + """Check num_pages_needed is calculated correctly""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + # This relies on the default response which is fed into HTTPretty + self.assertEqual(results.num_pages_needed, 3.0) + + def test_cloudsearch_results_matched(self): + """ + Check that information objects are passed back through the API + correctly. + """ + search = SearchConnection(endpoint=HOSTNAME) + query = search.build_query(q='Test') + + results = search(query) + + self.assertEqual(results.search_service, search) + self.assertEqual(results.query, query) + + def test_cloudsearch_results_hits(self): + """Check that documents are parsed properly from AWS""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + hits = list(map(lambda x: x['id'], results.docs)) + + # This relies on the default response which is fed into HTTPretty + self.assertEqual( + hits, ["12341", "12342", "12343", "12344", + "12345", "12346", "12347"]) + + def test_cloudsearch_results_iterator(self): + """Check the results iterator""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + results_correct = iter(["12341", "12342", "12343", "12344", + "12345", "12346", "12347"]) + for x in results: + self.assertEqual(x['id'], next(results_correct)) + + + def test_cloudsearch_results_internal_consistancy(self): + """Check the documents length matches the iterator details""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + self.assertEqual(len(results), len(results.docs)) + + def test_cloudsearch_search_nextpage(self): + """Check next page query is correct""" + search = SearchConnection(endpoint=HOSTNAME) + query1 = search.build_query(q='Test') + query2 = search.build_query(q='Test') + + results = search(query2) + + self.assertEqual(results.next_page().query.start, + query1.start + query1.size) + self.assertEqual(query1.q, query2.q) + +class CloudSearchSearchFacetTest(CloudSearchSearchBaseTest): + response = { + 'rank': '-text_relevance', + 'match-expr': "Test", + 'hits': { + 'found': 30, + 'start': 0, + 'hit': CloudSearchSearchBaseTest.hits + }, + 'info': { + 'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08', + 'time-ms': 2, + 'cpu-time-ms': 0 + }, + 'facets': { + 'tags': {}, + 'animals': {'constraints': [{'count': '2', 'value': 'fish'}, {'count': '1', 'value': 'lions'}]}, + } + } + + def test_cloudsearch_search_facets(self): + #self.response['facets'] = {'tags': {}} + + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test', facet=['tags']) + + self.assertTrue('tags' not in results.facets) + self.assertEqual(results.facets['animals'], {u'lions': u'1', u'fish': u'2'}) + + +class CloudSearchNonJsonTest(CloudSearchSearchBaseTest): + response = b'

    500 Internal Server Error

    ' + response_status = 500 + content_type = 'text/xml' + + def test_response(self): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaises(SearchServiceException): + search.search(q='Test') + + +class CloudSearchUnauthorizedTest(CloudSearchSearchBaseTest): + response = b'

    403 Forbidden

    foo bar baz' + response_status = 403 + content_type = 'text/html' + + def test_response(self): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaisesRegexp(SearchServiceException, 'foo bar baz'): + search.search(q='Test') + + +class FakeResponse(object): + status_code = 405 + content = b'' + + +class CloudSearchConnectionTest(unittest.TestCase): + cloudsearch = True + + def setUp(self): + super(CloudSearchConnectionTest, self).setUp() + self.conn = SearchConnection( + endpoint='test-domain.cloudsearch.amazonaws.com' + ) + + def test_expose_additional_error_info(self): + mpo = mock.patch.object + fake = FakeResponse() + fake.content = b'Nopenopenope' + + # First, in the case of a non-JSON, non-403 error. + with mpo(requests, 'get', return_value=fake) as mock_request: + with self.assertRaises(SearchServiceException) as cm: + self.conn.search(q='not_gonna_happen') + + self.assertTrue('non-json response' in str(cm.exception)) + self.assertTrue('Nopenopenope' in str(cm.exception)) + + # Then with JSON & an 'error' key within. + fake.content = json.dumps({ + 'error': "Something went wrong. Oops." + }).encode('utf-8') + + with mpo(requests, 'get', return_value=fake) as mock_request: + with self.assertRaises(SearchServiceException) as cm: + self.conn.search(q='no_luck_here') + + self.assertTrue('Unknown error' in str(cm.exception)) + self.assertTrue('went wrong. Oops' in str(cm.exception)) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..662c2cfc2e400281c50d2f47619e7b821df126d9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/__init__.py @@ -0,0 +1,18 @@ +DEMO_DOMAIN_DATA = { + "SearchInstanceType": None, + "DomainId": "1234567890/demo", + "DomainName": "demo", + "Deleted": False, + "SearchInstanceCount": 0, + "Created": True, + "SearchService": { + "Endpoint": "search-demo.us-east-1.cloudsearch.amazonaws.com" + }, + "RequiresIndexDocuments": False, + "Processing": False, + "DocService": { + "Endpoint": "doc-demo.us-east-1.cloudsearch.amazonaws.com" + }, + "ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo", + "SearchPartitionCount": 0 +} diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..c182478e4fac41e1b5a43d58429b2e7eeae81b55 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_connection.py @@ -0,0 +1,250 @@ +#!/usr/bin env python + +from tests.unit import AWSMockServiceTestCase + +from boto.cloudsearch2.domain import Domain +from boto.cloudsearch2.layer1 import CloudSearchConnection + + +class TestCloudSearchCreateDomain(AWSMockServiceTestCase): + connection_class = CloudSearchConnection + + def default_body(self): + return b""" +{ + "CreateDomainResponse": { + "CreateDomainResult": { + "DomainStatus": { + "SearchInstanceType": null, + "DomainId": "1234567890/demo", + "DomainName": "demo", + "Deleted": false, + "SearchInstanceCount": 0, + "Created": true, + "SearchService": { + "Endpoint": "search-demo.us-east-1.cloudsearch.amazonaws.com" + }, + "RequiresIndexDocuments": false, + "Processing": false, + "DocService": { + "Endpoint": "doc-demo.us-east-1.cloudsearch.amazonaws.com" + }, + "ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo", + "SearchPartitionCount": 0 + } + }, + "ResponseMetadata": { + "RequestId": "00000000-0000-0000-0000-000000000000" + } + } +} +""" + + def test_create_domain(self): + self.set_http_response(status_code=200) + self.service_connection.create_domain('demo') + + self.assert_request_parameters({ + 'Action': 'CreateDomain', + 'ContentType': 'JSON', + 'DomainName': 'demo', + 'Version': '2013-01-01', + }) + + def test_cloudsearch_connect_result_endpoints(self): + """Check that endpoints & ARNs are correctly returned from AWS""" + + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + self.assertEqual( + domain.doc_service_endpoint, + "doc-demo.us-east-1.cloudsearch.amazonaws.com") + self.assertEqual(domain.service_arn, + "arn:aws:cs:us-east-1:1234567890:domain/demo") + self.assertEqual( + domain.search_service_endpoint, + "search-demo.us-east-1.cloudsearch.amazonaws.com") + + def test_cloudsearch_connect_result_statuses(self): + """Check that domain statuses are correctly returned from AWS""" + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + self.assertEqual(domain.created, True) + self.assertEqual(domain.processing, False) + self.assertEqual(domain.requires_index_documents, False) + self.assertEqual(domain.deleted, False) + + def test_cloudsearch_connect_result_details(self): + """Check that the domain information is correctly returned from AWS""" + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + self.assertEqual(domain.id, "1234567890/demo") + self.assertEqual(domain.name, "demo") + + def test_cloudsearch_documentservice_creation(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + document = domain.get_document_service() + + self.assertEqual( + document.endpoint, + "doc-demo.us-east-1.cloudsearch.amazonaws.com") + + def test_cloudsearch_searchservice_creation(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + search = domain.get_search_service() + + self.assertEqual( + search.endpoint, + "search-demo.us-east-1.cloudsearch.amazonaws.com") + + +class CloudSearchConnectionDeletionTest(AWSMockServiceTestCase): + connection_class = CloudSearchConnection + + def default_body(self): + return b""" +{ + "DeleteDomainResponse": { + "DeleteDomainResult": { + "DomainStatus": { + "SearchInstanceType": null, + "DomainId": "1234567890/demo", + "DomainName": "test", + "Deleted": true, + "SearchInstanceCount": 0, + "Created": true, + "SearchService": { + "Endpoint": null + }, + "RequiresIndexDocuments": false, + "Processing": false, + "DocService": { + "Endpoint": null + }, + "ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo", + "SearchPartitionCount": 0 + } + }, + "ResponseMetadata": { + "RequestId": "00000000-0000-0000-0000-000000000000" + } + } +} +""" + + def test_cloudsearch_deletion(self): + """ + Check that the correct arguments are sent to AWS when creating a + cloudsearch connection. + """ + self.set_http_response(status_code=200) + self.service_connection.delete_domain('demo') + + self.assert_request_parameters({ + 'Action': 'DeleteDomain', + 'ContentType': 'JSON', + 'DomainName': 'demo', + 'Version': '2013-01-01', + }) + + +class CloudSearchConnectionIndexDocumentTest(AWSMockServiceTestCase): + connection_class = CloudSearchConnection + + def default_body(self): + return b""" +{ + "IndexDocumentsResponse": { + "IndexDocumentsResult": { + "FieldNames": [ + "average_score", + "brand_id", + "colors", + "context", + "context_owner", + "created_at", + "creator_id", + "description", + "file_size", + "format", + "has_logo", + "has_messaging", + "height", + "image_id", + "ingested_from", + "is_advertising", + "is_photo", + "is_reviewed", + "modified_at", + "subject_date", + "tags", + "title", + "width" + ] + }, + "ResponseMetadata": { + "RequestId": "42e618d9-c4d9-11e3-8242-c32da3041159" + } + } +} +""" + + def test_cloudsearch_index_documents(self): + """ + Check that the correct arguments are sent to AWS when indexing a + domain. + """ + self.set_http_response(status_code=200) + self.service_connection.index_documents('demo') + + self.assert_request_parameters({ + 'Action': 'IndexDocuments', + 'ContentType': 'JSON', + 'DomainName': 'demo', + 'Version': '2013-01-01', + }) + + def test_cloudsearch_index_documents_resp(self): + """ + Check that the AWS response is being parsed correctly when indexing a + domain. + """ + self.set_http_response(status_code=200) + api_response = self.service_connection.index_documents('demo') + + fields = (api_response['IndexDocumentsResponse'] + ['IndexDocumentsResult'] + ['FieldNames']) + + self.assertEqual(fields, ['average_score', 'brand_id', 'colors', + 'context', 'context_owner', + 'created_at', 'creator_id', + 'description', 'file_size', 'format', + 'has_logo', 'has_messaging', 'height', + 'image_id', 'ingested_from', + 'is_advertising', 'is_photo', + 'is_reviewed', 'modified_at', + 'subject_date', 'tags', 'title', + 'width']) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_document.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_document.py new file mode 100644 index 0000000000000000000000000000000000000000..dac4aa643180237f363264eff358066d60fa0d50 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_document.py @@ -0,0 +1,346 @@ +#!/usr/bin env python +from boto.cloudsearch2.domain import Domain +from boto.cloudsearch2.layer1 import CloudSearchConnection + +from tests.unit import unittest, AWSMockServiceTestCase +from httpretty import HTTPretty +from mock import MagicMock + +import json + +from boto.cloudsearch2.document import DocumentServiceConnection +from boto.cloudsearch2.document import CommitMismatchError, EncodingError, \ + ContentTooLongError, DocumentServiceConnection + +import boto +from tests.unit.cloudsearch2 import DEMO_DOMAIN_DATA + + +class CloudSearchDocumentConnectionTest(AWSMockServiceTestCase): + connection_class = CloudSearchConnection + + def test_proxy(self): + conn = self.service_connection + conn.proxy = "127.0.0.1" + conn.proxy_user = "john.doe" + conn.proxy_pass="p4ssw0rd" + conn.proxy_port="8180" + conn.use_proxy = True + + domain = Domain(conn, DEMO_DOMAIN_DATA) + service = DocumentServiceConnection(domain=domain) + self.assertEqual(service.proxy, {'http': 'http://john.doe:p4ssw0rd@127.0.0.1:8180'}) + + +class CloudSearchDocumentTest(unittest.TestCase): + def setUp(self): + HTTPretty.enable() + HTTPretty.register_uri( + HTTPretty.POST, + ("http://doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com/" + "2013-01-01/documents/batch"), + body=json.dumps(self.response).encode('utf-8'), + content_type="application/json") + + def tearDown(self): + HTTPretty.disable() + + +class CloudSearchDocumentSingleTest(CloudSearchDocumentTest): + + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + def test_cloudsearch_add_basics(self): + """ + Check that a simple add document actually sends an add document request + to AWS. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + document.commit() + + args = json.loads(HTTPretty.last_request.body.decode('utf-8'))[0] + + self.assertEqual(args['type'], 'add') + + def test_cloudsearch_add_single_basic(self): + """ + Check that a simple add document sends correct document metadata to + AWS. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + document.commit() + + args = json.loads(HTTPretty.last_request.body.decode('utf-8'))[0] + + self.assertEqual(args['id'], '1234') + self.assertEqual(args['type'], 'add') + + def test_cloudsearch_add_single_fields(self): + """ + Check that a simple add document sends the actual document to AWS. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + document.commit() + + args = json.loads(HTTPretty.last_request.body.decode('utf-8'))[0] + + self.assertEqual(args['fields']['category'], ['cat_a', 'cat_b', + 'cat_c']) + self.assertEqual(args['fields']['id'], '1234') + self.assertEqual(args['fields']['title'], 'Title 1') + + def test_cloudsearch_add_single_result(self): + """ + Check that the reply from adding a single document is correctly parsed. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + doc = document.commit() + + self.assertEqual(doc.status, 'success') + self.assertEqual(doc.adds, 1) + self.assertEqual(doc.deletes, 0) + + self.assertEqual(doc.doc_service, document) + + +class CloudSearchDocumentMultipleAddTest(CloudSearchDocumentTest): + + response = { + 'status': 'success', + 'adds': 3, + 'deletes': 0, + } + + objs = { + '1234': { + 'fields': {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}}, + '1235': { + 'fields': {"id": "1235", "title": "Title 2", + "category": ["cat_b", "cat_c", + "cat_d"]}}, + '1236': { + 'fields': {"id": "1236", "title": "Title 3", + "category": ["cat_e", "cat_f", "cat_g"]}}, + } + + + def test_cloudsearch_add_basics(self): + """Check that multiple documents are added correctly to AWS""" + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + for (key, obj) in self.objs.items(): + document.add(key, obj['fields']) + document.commit() + + args = json.loads(HTTPretty.last_request.body.decode('utf-8')) + + for arg in args: + self.assertTrue(arg['id'] in self.objs) + self.assertEqual(arg['fields']['id'], + self.objs[arg['id']]['fields']['id']) + self.assertEqual(arg['fields']['title'], + self.objs[arg['id']]['fields']['title']) + self.assertEqual(arg['fields']['category'], + self.objs[arg['id']]['fields']['category']) + + def test_cloudsearch_add_results(self): + """ + Check that the result from adding multiple documents is parsed + correctly. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + for (key, obj) in self.objs.items(): + document.add(key, obj['fields']) + doc = document.commit() + + self.assertEqual(doc.status, 'success') + self.assertEqual(doc.adds, len(self.objs)) + self.assertEqual(doc.deletes, 0) + + +class CloudSearchDocumentDelete(CloudSearchDocumentTest): + + response = { + 'status': 'success', + 'adds': 0, + 'deletes': 1, + } + + def test_cloudsearch_delete(self): + """ + Test that the request for a single document deletion is done properly. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.delete("5") + document.commit() + args = json.loads(HTTPretty.last_request.body.decode('utf-8'))[0] + + self.assertEqual(args['type'], 'delete') + self.assertEqual(args['id'], '5') + + def test_cloudsearch_delete_results(self): + """ + Check that the result of a single document deletion is parsed properly. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.delete("5") + doc = document.commit() + + self.assertEqual(doc.status, 'success') + self.assertEqual(doc.adds, 0) + self.assertEqual(doc.deletes, 1) + + +class CloudSearchDocumentDeleteMultiple(CloudSearchDocumentTest): + response = { + 'status': 'success', + 'adds': 0, + 'deletes': 2, + } + + def test_cloudsearch_delete_multiples(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.delete("5") + document.delete("6") + document.commit() + args = json.loads(HTTPretty.last_request.body.decode('utf-8')) + + self.assertEqual(len(args), 2) + for arg in args: + self.assertEqual(arg['type'], 'delete') + + +class CloudSearchSDFManipulation(CloudSearchDocumentTest): + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + def test_cloudsearch_initial_sdf_is_blank(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + self.assertEqual(document.get_sdf(), '[]') + + def test_cloudsearch_single_document_sdf(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + + self.assertNotEqual(document.get_sdf(), '[]') + + document.clear_sdf() + + self.assertEqual(document.get_sdf(), '[]') + + +class CloudSearchBadSDFTesting(CloudSearchDocumentTest): + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + def test_cloudsearch_erroneous_sdf(self): + original = boto.log.error + boto.log.error = MagicMock() + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + document.add("1234", {"id": "1234", "title": None, + "category": ["cat_a", "cat_b", "cat_c"]}) + + document.commit() + self.assertNotEqual(len(boto.log.error.call_args_list), 1) + + boto.log.error = original + + +class CloudSearchDocumentErrorBadUnicode(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'adds': 0, + 'deletes': 0, + 'errors': [{'message': 'Illegal Unicode character in document'}] + } + + def test_fake_bad_unicode(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + self.assertRaises(EncodingError, document.commit) + + +class CloudSearchDocumentErrorDocsTooBig(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'adds': 0, + 'deletes': 0, + 'errors': [{'message': 'The Content-Length is too long'}] + } + + def test_fake_docs_too_big(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + + self.assertRaises(ContentTooLongError, document.commit) + + +class CloudSearchDocumentErrorMismatch(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'adds': 0, + 'deletes': 0, + 'errors': [{'message': 'Something went wrong'}] + } + + def test_fake_failure(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + self.assertRaises(CommitMismatchError, document.commit) + + def test_attached_errors_list(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com" + ) + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + try: + document.commit() + #If we get here that is a problem + #Working around the assertRaises not giving me exception instance. + self.assertTrue(False) + except CommitMismatchError as e: + self.assertTrue(hasattr(e, 'errors')) + self.assertIsInstance(e.errors, list) + self.assertEquals(e.errors[0], self.response['errors'][0].get('message')) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_exceptions.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..ebf1b47cf7d402fb65b55ba460307c8455454354 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_exceptions.py @@ -0,0 +1,37 @@ +from boto.compat import json +from tests.compat import mock, unittest + +from tests.unit.cloudsearch2.test_search import HOSTNAME, \ + CloudSearchSearchBaseTest +from boto.cloudsearch2.search import SearchConnection, SearchServiceException + + +def fake_loads_value_error(content, *args, **kwargs): + """Callable to generate a fake ValueError""" + raise ValueError("HAHAHA! Totally not simplejson & you gave me bad JSON.") + + +def fake_loads_json_error(content, *args, **kwargs): + """Callable to generate a fake JSONDecodeError""" + raise json.JSONDecodeError('Using simplejson & you gave me bad JSON.', + '', 0) + + +class CloudSearchJSONExceptionTest(CloudSearchSearchBaseTest): + response = b'{}' + + def test_no_simplejson_value_error(self): + with mock.patch.object(json, 'loads', fake_loads_value_error): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaisesRegexp(SearchServiceException, 'non-json'): + search.search(q='test') + + @unittest.skipUnless(hasattr(json, 'JSONDecodeError'), + 'requires simplejson') + def test_simplejson_jsondecodeerror(self): + with mock.patch.object(json, 'loads', fake_loads_json_error): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaisesRegexp(SearchServiceException, 'non-json'): + search.search(q='test') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_search.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_search.py new file mode 100644 index 0000000000000000000000000000000000000000..8fa611d82ceacc05cce9fa415fbe6008c78f420b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearch2/test_search.py @@ -0,0 +1,387 @@ +#!/usr/bin env python +from boto.cloudsearch2.domain import Domain +from boto.cloudsearch2.layer1 import CloudSearchConnection + +from tests.compat import mock, unittest +from httpretty import HTTPretty + +import json + +from boto.cloudsearch2.search import SearchConnection, SearchServiceException +from boto.compat import six, map +from tests.unit import AWSMockServiceTestCase +from tests.unit.cloudsearch2 import DEMO_DOMAIN_DATA +from tests.unit.cloudsearch2.test_connection import TestCloudSearchCreateDomain + +HOSTNAME = "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com" +FULL_URL = 'http://%s/2013-01-01/search' % HOSTNAME + + +class CloudSearchSearchBaseTest(unittest.TestCase): + + hits = [ + { + 'id': '12341', + 'fields': { + 'title': 'Document 1', + 'rank': 1 + } + }, + { + 'id': '12342', + 'fields': { + 'title': 'Document 2', + 'rank': 2 + } + }, + { + 'id': '12343', + 'fields': { + 'title': 'Document 3', + 'rank': 3 + } + }, + { + 'id': '12344', + 'fields': { + 'title': 'Document 4', + 'rank': 4 + } + }, + { + 'id': '12345', + 'fields': { + 'title': 'Document 5', + 'rank': 5 + } + }, + { + 'id': '12346', + 'fields': { + 'title': 'Document 6', + 'rank': 6 + } + }, + { + 'id': '12347', + 'fields': { + 'title': 'Document 7', + 'rank': 7 + } + }, + ] + + content_type = "text/xml" + response_status = 200 + + def get_args(self, requestline): + (_, request, _) = requestline.split(b" ") + (_, request) = request.split(b"?", 1) + args = six.moves.urllib.parse.parse_qs(request) + return args + + def setUp(self): + HTTPretty.enable() + body = self.response + + if not isinstance(body, bytes): + body = json.dumps(body).encode('utf-8') + + HTTPretty.register_uri(HTTPretty.GET, FULL_URL, + body=body, + content_type=self.content_type, + status=self.response_status) + + def tearDown(self): + HTTPretty.disable() + + +class CloudSearchSearchTest(CloudSearchSearchBaseTest): + response = { + 'rank': '-text_relevance', + 'match-expr': "Test", + 'hits': { + 'found': 30, + 'start': 0, + 'hit': CloudSearchSearchBaseTest.hits + }, + 'status': { + 'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08', + 'time-ms': 2, + 'cpu-time-ms': 0 + } + + } + + def test_cloudsearch_qsearch(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', options='TestOptions') + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'q'], [b"Test"]) + self.assertEqual(args[b'q.options'], [b"TestOptions"]) + self.assertEqual(args[b'start'], [b"0"]) + self.assertEqual(args[b'size'], [b"10"]) + + def test_cloudsearch_search_details(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', size=50, start=20) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'q'], [b"Test"]) + self.assertEqual(args[b'size'], [b"50"]) + self.assertEqual(args[b'start'], [b"20"]) + + def test_cloudsearch_facet_constraint_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search( + q='Test', + facet={'author': "'John Smith','Mark Smith'"}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'facet.author'], + [b"'John Smith','Mark Smith'"]) + + def test_cloudsearch_facet_constraint_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search( + q='Test', + facet={'author': "'John Smith','Mark Smith'", + 'category': "'News','Reviews'"}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'facet.author'], + [b"'John Smith','Mark Smith'"]) + self.assertEqual(args[b'facet.category'], + [b"'News','Reviews'"]) + + def test_cloudsearch_facet_sort_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet={'author': {'sort': 'alpha'}}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + print(args) + + self.assertEqual(args[b'facet.author'], [b'{"sort": "alpha"}']) + + def test_cloudsearch_facet_sort_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet={'author': {'sort': 'alpha'}, + 'cat': {'sort': 'count'}}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'facet.author'], [b'{"sort": "alpha"}']) + self.assertEqual(args[b'facet.cat'], [b'{"sort": "count"}']) + + def test_cloudsearch_result_fields_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', return_fields=['author']) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'return'], [b'author']) + + def test_cloudsearch_result_fields_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', return_fields=['author', 'title']) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args[b'return'], [b'author,title']) + + def test_cloudsearch_results_meta(self): + """Check returned metadata is parsed correctly""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + # These rely on the default response which is fed into HTTPretty + self.assertEqual(results.hits, 30) + self.assertEqual(results.docs[0]['fields']['rank'], 1) + + def test_cloudsearch_results_info(self): + """Check num_pages_needed is calculated correctly""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + # This relies on the default response which is fed into HTTPretty + self.assertEqual(results.num_pages_needed, 3.0) + + def test_cloudsearch_results_matched(self): + """ + Check that information objects are passed back through the API + correctly. + """ + search = SearchConnection(endpoint=HOSTNAME) + query = search.build_query(q='Test') + + results = search(query) + + self.assertEqual(results.search_service, search) + self.assertEqual(results.query, query) + + def test_cloudsearch_results_hits(self): + """Check that documents are parsed properly from AWS""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + hits = list(map(lambda x: x['id'], results.docs)) + + # This relies on the default response which is fed into HTTPretty + self.assertEqual( + hits, ["12341", "12342", "12343", "12344", + "12345", "12346", "12347"]) + + def test_cloudsearch_results_iterator(self): + """Check the results iterator""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + results_correct = iter(["12341", "12342", "12343", "12344", + "12345", "12346", "12347"]) + for x in results: + self.assertEqual(x['id'], next(results_correct)) + + def test_cloudsearch_results_internal_consistancy(self): + """Check the documents length matches the iterator details""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + self.assertEqual(len(results), len(results.docs)) + + def test_cloudsearch_search_nextpage(self): + """Check next page query is correct""" + search = SearchConnection(endpoint=HOSTNAME) + query1 = search.build_query(q='Test') + query2 = search.build_query(q='Test') + + results = search(query2) + + self.assertEqual(results.next_page().query.start, + query1.start + query1.size) + self.assertEqual(query1.q, query2.q) + + +class CloudSearchSearchFacetTest(CloudSearchSearchBaseTest): + response = { + 'rank': '-text_relevance', + 'match-expr': "Test", + 'hits': { + 'found': 30, + 'start': 0, + 'hit': CloudSearchSearchBaseTest.hits + }, + 'status': { + 'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08', + 'time-ms': 2, + 'cpu-time-ms': 0 + }, + 'facets': { + 'tags': {}, + 'animals': {'buckets': [{'count': '2', 'value': 'fish'}, {'count': '1', 'value': 'lions'}]}, + } + } + + def test_cloudsearch_search_facets(self): + #self.response['facets'] = {'tags': {}} + + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test', facet={'tags': {}}) + + self.assertTrue('tags' not in results.facets) + self.assertEqual(results.facets['animals'], {u'lions': u'1', u'fish': u'2'}) + + +class CloudSearchNonJsonTest(CloudSearchSearchBaseTest): + response = b'

    500 Internal Server Error

    ' + response_status = 500 + content_type = 'text/xml' + + def test_response(self): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaises(SearchServiceException): + search.search(q='Test') + + +class CloudSearchUnauthorizedTest(CloudSearchSearchBaseTest): + response = b'

    403 Forbidden

    foo bar baz' + response_status = 403 + content_type = 'text/html' + + def test_response(self): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaisesRegexp(SearchServiceException, 'foo bar baz'): + search.search(q='Test') + + +class FakeResponse(object): + status_code = 405 + content = b'' + + +class CloudSearchConnectionTest(AWSMockServiceTestCase): + cloudsearch = True + connection_class = CloudSearchConnection + + def setUp(self): + super(CloudSearchConnectionTest, self).setUp() + self.conn = SearchConnection( + endpoint='test-domain.cloudsearch.amazonaws.com' + ) + + def test_expose_additional_error_info(self): + mpo = mock.patch.object + fake = FakeResponse() + fake.content = b'Nopenopenope' + + # First, in the case of a non-JSON, non-403 error. + with mpo(self.conn.session, 'get', return_value=fake) as mock_request: + with self.assertRaises(SearchServiceException) as cm: + self.conn.search(q='not_gonna_happen') + + self.assertTrue('non-json response' in str(cm.exception)) + self.assertTrue('Nopenopenope' in str(cm.exception)) + + # Then with JSON & an 'error' key within. + fake.content = json.dumps({ + 'error': "Something went wrong. Oops." + }).encode('utf-8') + + with mpo(self.conn.session, 'get', return_value=fake) as mock_request: + with self.assertRaises(SearchServiceException) as cm: + self.conn.search(q='no_luck_here') + + self.assertTrue('Unknown error' in str(cm.exception)) + self.assertTrue('went wrong. Oops' in str(cm.exception)) + + def test_proxy(self): + conn = self.service_connection + conn.proxy = "127.0.0.1" + conn.proxy_user = "john.doe" + conn.proxy_pass="p4ssw0rd" + conn.proxy_port="8180" + conn.use_proxy = True + + domain = Domain(conn, DEMO_DOMAIN_DATA) + search = SearchConnection(domain=domain) + self.assertEqual(search.session.proxies, {'http': 'http://john.doe:p4ssw0rd@127.0.0.1:8180'}) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearchdomain/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearchdomain/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py new file mode 100644 index 0000000000000000000000000000000000000000..694e98ff66710dc3dd029cdc0bfd85ecd79b3d9b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudsearchdomain/test_cloudsearchdomain.py @@ -0,0 +1,127 @@ +#!/usr/bin env python +import json +import mock +from tests.unit import AWSMockServiceTestCase +from boto.cloudsearch2.domain import Domain +from boto.cloudsearch2.layer1 import CloudSearchConnection +from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + + +class CloudSearchDomainConnectionTest(AWSMockServiceTestCase): + connection_class = CloudSearchDomainConnection + + domain_status = """{ + "SearchInstanceType": null, + "DomainId": "1234567890/demo", + "DomainName": "demo", + "Deleted": false, + "SearchInstanceCount": 0, + "Created": true, + "SearchService": { + "Endpoint": "search-demo.us-east-1.cloudsearch.amazonaws.com" + }, + "RequiresIndexDocuments": false, + "Processing": false, + "DocService": { + "Endpoint": "doc-demo.us-east-1.cloudsearch.amazonaws.com" + }, + "ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo", + "SearchPartitionCount": 0 + }""" + + def create_service_connection(self, **kwargs): + if kwargs.get('host', None) is None: + kwargs['host'] = 'search-demo.us-east-1.cloudsearch.amazonaws.com' + return super(CloudSearchDomainConnectionTest, self).\ + create_service_connection(**kwargs) + + def test_get_search_service(self): + layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + sign_request=True) + domain = Domain(layer1=layer1, data=json.loads(self.domain_status)) + search_service = domain.get_search_service() + + self.assertEqual(search_service.sign_request, True) + + def test_get_document_service(self): + layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + sign_request=True) + domain = Domain(layer1=layer1, data=json.loads(self.domain_status)) + document_service = domain.get_document_service() + + self.assertEqual(document_service.sign_request, True) + + def test_search_with_auth(self): + layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + sign_request=True) + domain = Domain(layer1=layer1, data=json.loads(self.domain_status)) + search_service = domain.get_search_service() + + response = { + 'rank': '-text_relevance', + 'match-expr': "Test", + 'hits': { + 'found': 30, + 'start': 0, + 'hit': { + 'id': '12341', + 'fields': { + 'title': 'Document 1', + 'rank': 1 + } + } + }, + 'status': { + 'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08', + 'time-ms': 2, + 'cpu-time-ms': 0 + } + + } + + self.set_http_response(status_code=200, body=json.dumps(response)) + search_service.domain_connection = self.service_connection + resp = search_service.search() + + headers = self.actual_request.headers + + self.assertIsNotNone(headers.get('Authorization')) + + def test_upload_documents_with_auth(self): + layer1 = CloudSearchConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + sign_request=True) + domain = Domain(layer1=layer1, data=json.loads(self.domain_status)) + document_service = domain.get_document_service() + + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + document = { + "id": "1234", + "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"] + } + + self.set_http_response(status_code=200, body=json.dumps(response)) + document_service.domain_connection = self.service_connection + document_service.add("1234", document) + resp = document_service.commit() + + headers = self.actual_request.headers + + self.assertIsNotNone(headers.get('Authorization')) + + def test_no_host_provided(self): + # A host must be provided or a error is thrown. + with self.assertRaises(ValueError): + CloudSearchDomainConnection( + aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key' + ) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudtrail/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudtrail/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudtrail/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudtrail/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..10f6f7022146bd77f3b012d3a4a7927af229b753 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/cloudtrail/test_layer1.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python + +import json + +from boto.cloudtrail.layer1 import CloudTrailConnection +from tests.unit import AWSMockServiceTestCase + + +class TestDescribeTrails(AWSMockServiceTestCase): + connection_class = CloudTrailConnection + + def default_body(self): + return b''' + {"trailList": + [ + { + "IncludeGlobalServiceEvents": false, + "Name": "test", + "SnsTopicName": "cloudtrail-1", + "S3BucketName": "cloudtrail-1" + } + ] + }''' + + def test_describe(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.describe_trails() + + self.assertEqual(1, len(api_response['trailList'])) + self.assertEqual('test', api_response['trailList'][0]['Name']) + + self.assert_request_parameters({}) + + target = self.actual_request.headers['X-Amz-Target'] + self.assertTrue('DescribeTrails' in target) + + def test_describe_name_list(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.describe_trails( + trail_name_list=['test']) + + self.assertEqual(1, len(api_response['trailList'])) + self.assertEqual('test', api_response['trailList'][0]['Name']) + + self.assertEqual(json.dumps({ + 'trailNameList': ['test'] + }), self.actual_request.body.decode('utf-8')) + + target = self.actual_request.headers['X-Amz-Target'] + self.assertTrue('DescribeTrails' in target) + + +class TestCreateTrail(AWSMockServiceTestCase): + connection_class = CloudTrailConnection + + def default_body(self): + return b''' + {"trail": + { + "IncludeGlobalServiceEvents": false, + "Name": "test", + "SnsTopicName": "cloudtrail-1", + "S3BucketName": "cloudtrail-1" + } + }''' + + def test_create(self): + self.set_http_response(status_code=200) + + api_response = self.service_connection.create_trail( + 'test', 'cloudtrail-1', sns_topic_name='cloudtrail-1', + include_global_service_events=False) + + self.assertEqual('test', api_response['trail']['Name']) + self.assertEqual('cloudtrail-1', api_response['trail']['S3BucketName']) + self.assertEqual('cloudtrail-1', api_response['trail']['SnsTopicName']) + self.assertEqual(False, + api_response['trail']['IncludeGlobalServiceEvents']) + + target = self.actual_request.headers['X-Amz-Target'] + self.assertTrue('CreateTrail' in target) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/directconnect/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/directconnect/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/directconnect/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/directconnect/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..851520975d0d83815c37ebebaad83d1718b4edef --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/directconnect/test_layer1.py @@ -0,0 +1,58 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.directconnect.layer1 import DirectConnectConnection +from tests.unit import AWSMockServiceTestCase + + +class TestDescribeTrails(AWSMockServiceTestCase): + connection_class = DirectConnectConnection + + def default_body(self): + return b''' +{ + "connections": [ + { + "bandwidth": "string", + "connectionId": "string", + "connectionName": "string", + "connectionState": "string", + "location": "string", + "ownerAccount": "string", + "partnerName": "string", + "region": "string", + "vlan": 1 + } + ] +}''' + + def test_describe(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.describe_connections() + + self.assertEqual(1, len(api_response['connections'])) + self.assertEqual('string', api_response['connections'][0]['region']) + + self.assert_request_parameters({}) + + target = self.actual_request.headers['X-Amz-Target'] + self.assertTrue('DescribeConnections' in target) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/test_batch.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/test_batch.py new file mode 100644 index 0000000000000000000000000000000000000000..545aed7b27c19af02881b6fd9ae2cfdfa2eaed2b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/test_batch.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from tests.unit import unittest + +from boto.dynamodb.batch import Batch +from boto.dynamodb.table import Table +from boto.dynamodb.layer2 import Layer2 +from boto.dynamodb.batch import BatchList + + +DESCRIBE_TABLE_1 = { + 'Table': { + 'CreationDateTime': 1349910554.478, + 'ItemCount': 1, + 'KeySchema': {'HashKeyElement': {'AttributeName': u'foo', + 'AttributeType': u'S'}}, + 'ProvisionedThroughput': {'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 10}, + 'TableName': 'testtable', + 'TableSizeBytes': 54, + 'TableStatus': 'ACTIVE'} +} + +DESCRIBE_TABLE_2 = { + 'Table': { + 'CreationDateTime': 1349910554.478, + 'ItemCount': 1, + 'KeySchema': {'HashKeyElement': {'AttributeName': u'baz', + 'AttributeType': u'S'}, + 'RangeKeyElement': {'AttributeName': 'myrange', + 'AttributeType': 'N'}}, + 'ProvisionedThroughput': {'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 10}, + 'TableName': 'testtable2', + 'TableSizeBytes': 54, + 'TableStatus': 'ACTIVE'} +} + + +class TestBatchObjects(unittest.TestCase): + maxDiff = None + + def setUp(self): + self.layer2 = Layer2('access_key', 'secret_key') + self.table = Table(self.layer2, DESCRIBE_TABLE_1) + self.table2 = Table(self.layer2, DESCRIBE_TABLE_2) + + def test_batch_to_dict(self): + b = Batch(self.table, ['k1', 'k2'], attributes_to_get=['foo'], + consistent_read=True) + self.assertDictEqual( + b.to_dict(), + {'AttributesToGet': ['foo'], + 'Keys': [{'HashKeyElement': {'S': 'k1'}}, + {'HashKeyElement': {'S': 'k2'}}], + 'ConsistentRead': True} + ) + + def test_batch_consistent_read_defaults_to_false(self): + b = Batch(self.table, ['k1']) + self.assertDictEqual( + b.to_dict(), + {'Keys': [{'HashKeyElement': {'S': 'k1'}}], + 'ConsistentRead': False} + ) + + def test_batch_list_consistent_read(self): + b = BatchList(self.layer2) + b.add_batch(self.table, ['k1'], ['foo'], consistent_read=True) + b.add_batch(self.table2, [('k2', 54)], ['bar'], consistent_read=False) + self.assertDictEqual( + b.to_dict(), + {'testtable': {'AttributesToGet': ['foo'], + 'Keys': [{'HashKeyElement': {'S': 'k1'}}], + 'ConsistentRead': True}, + 'testtable2': {'AttributesToGet': ['bar'], + 'Keys': [{'HashKeyElement': {'S': 'k2'}, + 'RangeKeyElement': {'N': '54'}}], + 'ConsistentRead': False}}) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/test_layer2.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/test_layer2.py new file mode 100644 index 0000000000000000000000000000000000000000..dc8555ed6a00dc9a01d51b67f7c5b7550101a2e7 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/test_layer2.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.unit import unittest +from mock import Mock + +from boto.dynamodb.layer2 import Layer2 +from boto.dynamodb.table import Table, Schema + + +DESCRIBE_TABLE = { + "Table": { + "CreationDateTime": 1.353526122785E9, "ItemCount": 1, + "KeySchema": { + "HashKeyElement": {"AttributeName": "foo", "AttributeType": "N"}}, + "ProvisionedThroughput": { + "NumberOfDecreasesToday": 0, + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5}, + "TableName": "footest", + "TableSizeBytes": 21, + "TableStatus": "ACTIVE"} +} + + +class TestTableConstruction(unittest.TestCase): + def setUp(self): + self.layer2 = Layer2('access_key', 'secret_key') + self.api = Mock() + self.layer2.layer1 = self.api + + def test_get_table(self): + self.api.describe_table.return_value = DESCRIBE_TABLE + table = self.layer2.get_table('footest') + self.assertEqual(table.name, 'footest') + self.assertEqual(table.create_time, 1353526122.785) + self.assertEqual(table.status, 'ACTIVE') + self.assertEqual(table.item_count, 1) + self.assertEqual(table.size_bytes, 21) + self.assertEqual(table.read_units, 5) + self.assertEqual(table.write_units, 5) + self.assertEqual(table.schema, Schema.create(hash_key=('foo', 'N'))) + + def test_create_table_without_api_call(self): + table = self.layer2.table_from_schema( + name='footest', + schema=Schema.create(hash_key=('foo', 'N'))) + self.assertEqual(table.name, 'footest') + self.assertEqual(table.schema, Schema.create(hash_key=('foo', 'N'))) + # describe_table is never called. + self.assertEqual(self.api.describe_table.call_count, 0) + + def test_create_schema_with_hash_and_range(self): + schema = self.layer2.create_schema('foo', int, 'bar', str) + self.assertEqual(schema.hash_key_name, 'foo') + self.assertEqual(schema.hash_key_type, 'N') + self.assertEqual(schema.range_key_name, 'bar') + self.assertEqual(schema.range_key_type, 'S') + + def test_create_schema_with_hash(self): + schema = self.layer2.create_schema('foo', str) + self.assertEqual(schema.hash_key_name, 'foo') + self.assertEqual(schema.hash_key_type, 'S') + self.assertIsNone(schema.range_key_name) + self.assertIsNone(schema.range_key_type) + + +class TestSchemaEquality(unittest.TestCase): + def test_schema_equal(self): + s1 = Schema.create(hash_key=('foo', 'N')) + s2 = Schema.create(hash_key=('foo', 'N')) + self.assertEqual(s1, s2) + + def test_schema_not_equal(self): + s1 = Schema.create(hash_key=('foo', 'N')) + s2 = Schema.create(hash_key=('bar', 'N')) + s3 = Schema.create(hash_key=('foo', 'S')) + self.assertNotEqual(s1, s2) + self.assertNotEqual(s1, s3) + + def test_equal_with_hash_and_range(self): + s1 = Schema.create(hash_key=('foo', 'N'), range_key=('bar', 'S')) + s2 = Schema.create(hash_key=('foo', 'N'), range_key=('bar', 'S')) + self.assertEqual(s1, s2) + + def test_schema_with_hash_and_range_not_equal(self): + s1 = Schema.create(hash_key=('foo', 'N'), range_key=('bar', 'S')) + s2 = Schema.create(hash_key=('foo', 'N'), range_key=('bar', 'N')) + s3 = Schema.create(hash_key=('foo', 'S'), range_key=('baz', 'N')) + s4 = Schema.create(hash_key=('bar', 'N'), range_key=('baz', 'N')) + self.assertNotEqual(s1, s2) + self.assertNotEqual(s1, s3) + self.assertNotEqual(s1, s4) + self.assertNotEqual(s2, s4) + self.assertNotEqual(s3, s4) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/test_types.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/test_types.py new file mode 100644 index 0000000000000000000000000000000000000000..ed72cc396b9bf2cf52fe870181a0d5cc67d04f1f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb/test_types.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from decimal import Decimal +from tests.compat import unittest + +from boto.compat import six +from boto.dynamodb import types +from boto.dynamodb.exceptions import DynamoDBNumberError + + +class TestDynamizer(unittest.TestCase): + def setUp(self): + pass + + def test_encoding_to_dynamodb(self): + dynamizer = types.Dynamizer() + self.assertEqual(dynamizer.encode('foo'), {'S': 'foo'}) + self.assertEqual(dynamizer.encode(54), {'N': '54'}) + self.assertEqual(dynamizer.encode(Decimal('1.1')), {'N': '1.1'}) + self.assertEqual(dynamizer.encode(set([1, 2, 3])), + {'NS': ['1', '2', '3']}) + self.assertIn(dynamizer.encode(set(['foo', 'bar'])), + ({'SS': ['foo', 'bar']}, {'SS': ['bar', 'foo']})) + self.assertEqual(dynamizer.encode(types.Binary(b'\x01')), + {'B': 'AQ=='}) + self.assertEqual(dynamizer.encode(set([types.Binary(b'\x01')])), + {'BS': ['AQ==']}) + self.assertEqual(dynamizer.encode(['foo', 54, [1]]), + {'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]}) + self.assertEqual(dynamizer.encode({'foo': 'bar', 'hoge': {'sub': 1}}), + {'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}}) + self.assertEqual(dynamizer.encode(None), {'NULL': True}) + self.assertEqual(dynamizer.encode(False), {'BOOL': False}) + + def test_decoding_to_dynamodb(self): + dynamizer = types.Dynamizer() + self.assertEqual(dynamizer.decode({'S': 'foo'}), 'foo') + self.assertEqual(dynamizer.decode({'N': '54'}), 54) + self.assertEqual(dynamizer.decode({'N': '1.1'}), Decimal('1.1')) + self.assertEqual(dynamizer.decode({'NS': ['1', '2', '3']}), + set([1, 2, 3])) + self.assertEqual(dynamizer.decode({'SS': ['foo', 'bar']}), + set(['foo', 'bar'])) + self.assertEqual(dynamizer.decode({'B': 'AQ=='}), types.Binary(b'\x01')) + self.assertEqual(dynamizer.decode({'BS': ['AQ==']}), + set([types.Binary(b'\x01')])) + self.assertEqual(dynamizer.decode({'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]}), + ['foo', 54, [1]]) + self.assertEqual(dynamizer.decode({'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}}), + {'foo': 'bar', 'hoge': {'sub': 1}}) + self.assertEqual(dynamizer.decode({'NULL': True}), None) + self.assertEqual(dynamizer.decode({'BOOL': False}), False) + + def test_float_conversion_errors(self): + dynamizer = types.Dynamizer() + # When supporting decimals, certain floats will work: + self.assertEqual(dynamizer.encode(1.25), {'N': '1.25'}) + # And some will generate errors, which is why it's best + # to just use Decimals directly: + with self.assertRaises(DynamoDBNumberError): + dynamizer.encode(1.1) + + def test_non_boolean_conversions(self): + dynamizer = types.NonBooleanDynamizer() + self.assertEqual(dynamizer.encode(True), {'N': '1'}) + + def test_lossy_float_conversions(self): + dynamizer = types.LossyFloatDynamizer() + # Just testing the differences here, specifically float conversions: + self.assertEqual(dynamizer.encode(1.1), {'N': '1.1'}) + self.assertEqual(dynamizer.decode({'N': '1.1'}), 1.1) + + self.assertEqual(dynamizer.encode(set([1.1])), + {'NS': ['1.1']}) + self.assertEqual(dynamizer.decode({'NS': ['1.1', '2.2', '3.3']}), + set([1.1, 2.2, 3.3])) + + +class TestBinary(unittest.TestCase): + def test_good_input(self): + data = types.Binary(b'\x01') + self.assertEqual(b'\x01', data) + self.assertEqual(b'\x01', bytes(data)) + + def test_non_ascii_good_input(self): + # Binary data that is out of ASCII range + data = types.Binary(b'\x88') + self.assertEqual(b'\x88', data) + self.assertEqual(b'\x88', bytes(data)) + + @unittest.skipUnless(six.PY2, "Python 2 only") + def test_bad_input(self): + with self.assertRaises(TypeError): + types.Binary(1) + + @unittest.skipUnless(six.PY3, "Python 3 only") + def test_bytes_input(self): + data = types.Binary(1) + self.assertEqual(data, b'\x00') + self.assertEqual(data.value, b'\x00') + + @unittest.skipUnless(six.PY2, "Python 2 only") + def test_unicode_py2(self): + # It's dirty. But remains for backward compatibility. + data = types.Binary(u'\x01') + self.assertEqual(data, b'\x01') + self.assertEqual(bytes(data), b'\x01') + + # Delegate to built-in b'\x01' == u'\x01' + # In Python 2.x these are considered equal + self.assertEqual(data, u'\x01') + + # Check that the value field is of type bytes + self.assertEqual(type(data.value), bytes) + + @unittest.skipUnless(six.PY3, "Python 3 only") + def test_unicode_py3(self): + with self.assertRaises(TypeError): + types.Binary(u'\x01') + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb2/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb2/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb2/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..405f1052c4c3ba262446505120f50b1815dc2cbd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb2/test_layer1.py @@ -0,0 +1,53 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests for Layer1 of DynamoDB v2 +""" +from tests.unit import unittest +from boto.dynamodb2.layer1 import DynamoDBConnection +from boto.regioninfo import RegionInfo + + +class DynamoDBv2Layer1UnitTest(unittest.TestCase): + dynamodb = True + + def test_init_region(self): + dynamodb = DynamoDBConnection( + aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key') + self.assertEqual(dynamodb.region.name, 'us-east-1') + dynamodb = DynamoDBConnection( + region=RegionInfo(name='us-west-2', + endpoint='dynamodb.us-west-2.amazonaws.com'), + aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + ) + self.assertEqual(dynamodb.region.name, 'us-west-2') + + def test_init_host_override(self): + dynamodb = DynamoDBConnection( + aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + host='localhost', port=8000) + self.assertEqual(dynamodb.host, 'localhost') + self.assertEqual(dynamodb.port, 8000) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb2/test_table.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb2/test_table.py new file mode 100644 index 0000000000000000000000000000000000000000..87bdbe48f8ec0f8c4200a30d890bb112e390e984 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/dynamodb2/test_table.py @@ -0,0 +1,3066 @@ +from tests.compat import mock, unittest +from boto.dynamodb2 import exceptions +from boto.dynamodb2.fields import (HashKey, RangeKey, + AllIndex, KeysOnlyIndex, IncludeIndex, + GlobalAllIndex, GlobalKeysOnlyIndex, + GlobalIncludeIndex) +from boto.dynamodb2.items import Item +from boto.dynamodb2.layer1 import DynamoDBConnection +from boto.dynamodb2.results import ResultSet, BatchGetResultSet +from boto.dynamodb2.table import Table +from boto.dynamodb2.types import (STRING, NUMBER, BINARY, + FILTER_OPERATORS, QUERY_OPERATORS) +from boto.exception import JSONResponseError +from boto.compat import six, long_type + + +FakeDynamoDBConnection = mock.create_autospec(DynamoDBConnection) + + +class SchemaFieldsTestCase(unittest.TestCase): + def test_hash_key(self): + hash_key = HashKey('hello') + self.assertEqual(hash_key.name, 'hello') + self.assertEqual(hash_key.data_type, STRING) + self.assertEqual(hash_key.attr_type, 'HASH') + + self.assertEqual(hash_key.definition(), { + 'AttributeName': 'hello', + 'AttributeType': 'S' + }) + self.assertEqual(hash_key.schema(), { + 'AttributeName': 'hello', + 'KeyType': 'HASH' + }) + + def test_range_key(self): + range_key = RangeKey('hello') + self.assertEqual(range_key.name, 'hello') + self.assertEqual(range_key.data_type, STRING) + self.assertEqual(range_key.attr_type, 'RANGE') + + self.assertEqual(range_key.definition(), { + 'AttributeName': 'hello', + 'AttributeType': 'S' + }) + self.assertEqual(range_key.schema(), { + 'AttributeName': 'hello', + 'KeyType': 'RANGE' + }) + + def test_alternate_type(self): + alt_key = HashKey('alt', data_type=NUMBER) + self.assertEqual(alt_key.name, 'alt') + self.assertEqual(alt_key.data_type, NUMBER) + self.assertEqual(alt_key.attr_type, 'HASH') + + self.assertEqual(alt_key.definition(), { + 'AttributeName': 'alt', + 'AttributeType': 'N' + }) + self.assertEqual(alt_key.schema(), { + 'AttributeName': 'alt', + 'KeyType': 'HASH' + }) + + +class IndexFieldTestCase(unittest.TestCase): + def test_all_index(self): + all_index = AllIndex('AllKeys', parts=[ + HashKey('username'), + RangeKey('date_joined') + ]) + self.assertEqual(all_index.name, 'AllKeys') + self.assertEqual([part.attr_type for part in all_index.parts], [ + 'HASH', + 'RANGE' + ]) + self.assertEqual(all_index.projection_type, 'ALL') + + self.assertEqual(all_index.definition(), [ + {'AttributeName': 'username', 'AttributeType': 'S'}, + {'AttributeName': 'date_joined', 'AttributeType': 'S'} + ]) + self.assertEqual(all_index.schema(), { + 'IndexName': 'AllKeys', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'date_joined', + 'KeyType': 'RANGE' + } + ], + 'Projection': { + 'ProjectionType': 'ALL' + } + }) + + def test_keys_only_index(self): + keys_only = KeysOnlyIndex('KeysOnly', parts=[ + HashKey('username'), + RangeKey('date_joined') + ]) + self.assertEqual(keys_only.name, 'KeysOnly') + self.assertEqual([part.attr_type for part in keys_only.parts], [ + 'HASH', + 'RANGE' + ]) + self.assertEqual(keys_only.projection_type, 'KEYS_ONLY') + + self.assertEqual(keys_only.definition(), [ + {'AttributeName': 'username', 'AttributeType': 'S'}, + {'AttributeName': 'date_joined', 'AttributeType': 'S'} + ]) + self.assertEqual(keys_only.schema(), { + 'IndexName': 'KeysOnly', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'date_joined', + 'KeyType': 'RANGE' + } + ], + 'Projection': { + 'ProjectionType': 'KEYS_ONLY' + } + }) + + def test_include_index(self): + include_index = IncludeIndex('IncludeKeys', parts=[ + HashKey('username'), + RangeKey('date_joined') + ], includes=[ + 'gender', + 'friend_count' + ]) + self.assertEqual(include_index.name, 'IncludeKeys') + self.assertEqual([part.attr_type for part in include_index.parts], [ + 'HASH', + 'RANGE' + ]) + self.assertEqual(include_index.projection_type, 'INCLUDE') + + self.assertEqual(include_index.definition(), [ + {'AttributeName': 'username', 'AttributeType': 'S'}, + {'AttributeName': 'date_joined', 'AttributeType': 'S'} + ]) + self.assertEqual(include_index.schema(), { + 'IndexName': 'IncludeKeys', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'date_joined', + 'KeyType': 'RANGE' + } + ], + 'Projection': { + 'ProjectionType': 'INCLUDE', + 'NonKeyAttributes': [ + 'gender', + 'friend_count', + ] + } + }) + + def test_global_all_index(self): + all_index = GlobalAllIndex('AllKeys', parts=[ + HashKey('username'), + RangeKey('date_joined') + ], + throughput={ + 'read': 6, + 'write': 2, + }) + self.assertEqual(all_index.name, 'AllKeys') + self.assertEqual([part.attr_type for part in all_index.parts], [ + 'HASH', + 'RANGE' + ]) + self.assertEqual(all_index.projection_type, 'ALL') + + self.assertEqual(all_index.definition(), [ + {'AttributeName': 'username', 'AttributeType': 'S'}, + {'AttributeName': 'date_joined', 'AttributeType': 'S'} + ]) + self.assertEqual(all_index.schema(), { + 'IndexName': 'AllKeys', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'date_joined', + 'KeyType': 'RANGE' + } + ], + 'Projection': { + 'ProjectionType': 'ALL' + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 6, + 'WriteCapacityUnits': 2 + } + }) + + def test_global_keys_only_index(self): + keys_only = GlobalKeysOnlyIndex('KeysOnly', parts=[ + HashKey('username'), + RangeKey('date_joined') + ], + throughput={ + 'read': 3, + 'write': 4, + }) + self.assertEqual(keys_only.name, 'KeysOnly') + self.assertEqual([part.attr_type for part in keys_only.parts], [ + 'HASH', + 'RANGE' + ]) + self.assertEqual(keys_only.projection_type, 'KEYS_ONLY') + + self.assertEqual(keys_only.definition(), [ + {'AttributeName': 'username', 'AttributeType': 'S'}, + {'AttributeName': 'date_joined', 'AttributeType': 'S'} + ]) + self.assertEqual(keys_only.schema(), { + 'IndexName': 'KeysOnly', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'date_joined', + 'KeyType': 'RANGE' + } + ], + 'Projection': { + 'ProjectionType': 'KEYS_ONLY' + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 3, + 'WriteCapacityUnits': 4 + } + }) + + def test_global_include_index(self): + # Lean on the default throughput + include_index = GlobalIncludeIndex('IncludeKeys', parts=[ + HashKey('username'), + RangeKey('date_joined') + ], includes=[ + 'gender', + 'friend_count' + ]) + self.assertEqual(include_index.name, 'IncludeKeys') + self.assertEqual([part.attr_type for part in include_index.parts], [ + 'HASH', + 'RANGE' + ]) + self.assertEqual(include_index.projection_type, 'INCLUDE') + + self.assertEqual(include_index.definition(), [ + {'AttributeName': 'username', 'AttributeType': 'S'}, + {'AttributeName': 'date_joined', 'AttributeType': 'S'} + ]) + self.assertEqual(include_index.schema(), { + 'IndexName': 'IncludeKeys', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'date_joined', + 'KeyType': 'RANGE' + } + ], + 'Projection': { + 'ProjectionType': 'INCLUDE', + 'NonKeyAttributes': [ + 'gender', + 'friend_count', + ] + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + }) + + def test_global_include_index_throughput(self): + include_index = GlobalIncludeIndex('IncludeKeys', parts=[ + HashKey('username'), + RangeKey('date_joined') + ], includes=[ + 'gender', + 'friend_count' + ], throughput={ + 'read': 10, + 'write': 8 + }) + + self.assertEqual(include_index.schema(), { + 'IndexName': 'IncludeKeys', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'date_joined', + 'KeyType': 'RANGE' + } + ], + 'Projection': { + 'ProjectionType': 'INCLUDE', + 'NonKeyAttributes': [ + 'gender', + 'friend_count', + ] + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 8 + } + }) + + +class ItemTestCase(unittest.TestCase): + if six.PY2: + assertCountEqual = unittest.TestCase.assertItemsEqual + + def setUp(self): + super(ItemTestCase, self).setUp() + self.table = Table('whatever', connection=FakeDynamoDBConnection()) + self.johndoe = self.create_item({ + 'username': 'johndoe', + 'first_name': 'John', + 'date_joined': 12345, + }) + + def create_item(self, data): + return Item(self.table, data=data) + + def test_initialization(self): + empty_item = Item(self.table) + self.assertEqual(empty_item.table, self.table) + self.assertEqual(empty_item._data, {}) + + full_item = Item(self.table, data={ + 'username': 'johndoe', + 'date_joined': 12345, + }) + self.assertEqual(full_item.table, self.table) + self.assertEqual(full_item._data, { + 'username': 'johndoe', + 'date_joined': 12345, + }) + + # The next couple methods make use of ``sorted(...)`` so we get consistent + # ordering everywhere & no erroneous failures. + + def test_keys(self): + self.assertCountEqual(self.johndoe.keys(), [ + 'date_joined', + 'first_name', + 'username', + ]) + + def test_values(self): + self.assertCountEqual(self.johndoe.values(), + [12345, 'John', 'johndoe']) + + def test_contains(self): + self.assertIn('username', self.johndoe) + self.assertIn('first_name', self.johndoe) + self.assertIn('date_joined', self.johndoe) + self.assertNotIn('whatever', self.johndoe) + + def test_iter(self): + self.assertCountEqual(self.johndoe, + ['johndoe', 'John', 12345]) + + def test_get(self): + self.assertEqual(self.johndoe.get('username'), 'johndoe') + self.assertEqual(self.johndoe.get('first_name'), 'John') + self.assertEqual(self.johndoe.get('date_joined'), 12345) + + # Test a missing key. No default yields ``None``. + self.assertEqual(self.johndoe.get('last_name'), None) + # This time with a default. + self.assertEqual(self.johndoe.get('last_name', True), True) + + def test_items(self): + self.assertCountEqual( + self.johndoe.items(), + [ + ('date_joined', 12345), + ('first_name', 'John'), + ('username', 'johndoe'), + ]) + + def test_attribute_access(self): + self.assertEqual(self.johndoe['username'], 'johndoe') + self.assertEqual(self.johndoe['first_name'], 'John') + self.assertEqual(self.johndoe['date_joined'], 12345) + + # Test a missing key. + self.assertEqual(self.johndoe['last_name'], None) + + # Set a key. + self.johndoe['last_name'] = 'Doe' + # Test accessing the new key. + self.assertEqual(self.johndoe['last_name'], 'Doe') + + # Delete a key. + del self.johndoe['last_name'] + # Test the now-missing-again key. + self.assertEqual(self.johndoe['last_name'], None) + + def test_needs_save(self): + self.johndoe.mark_clean() + self.assertFalse(self.johndoe.needs_save()) + self.johndoe['last_name'] = 'Doe' + self.assertTrue(self.johndoe.needs_save()) + + def test_needs_save_set_changed(self): + # First, ensure we're clean. + self.johndoe.mark_clean() + self.assertFalse(self.johndoe.needs_save()) + # Add a friends collection. + self.johndoe['friends'] = set(['jane', 'alice']) + self.assertTrue(self.johndoe.needs_save()) + # Now mark it clean, then change the collection. + # This does NOT call ``__setitem__``, so the item used to be + # incorrectly appearing to be clean, when it had in fact been changed. + self.johndoe.mark_clean() + self.assertFalse(self.johndoe.needs_save()) + self.johndoe['friends'].add('bob') + self.assertTrue(self.johndoe.needs_save()) + + def test_mark_clean(self): + self.johndoe['last_name'] = 'Doe' + self.assertTrue(self.johndoe.needs_save()) + self.johndoe.mark_clean() + self.assertFalse(self.johndoe.needs_save()) + + def test_load(self): + empty_item = Item(self.table) + empty_item.load({ + 'Item': { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056668'}, + 'friend_count': {'N': '3'}, + 'friends': {'SS': ['alice', 'bob', 'jane']}, + } + }) + self.assertEqual(empty_item['username'], 'johndoe') + self.assertEqual(empty_item['date_joined'], 1366056668) + self.assertEqual(sorted(empty_item['friends']), sorted([ + 'alice', + 'bob', + 'jane' + ])) + + def test_get_keys(self): + # Setup the data. + self.table.schema = [ + HashKey('username'), + RangeKey('date_joined'), + ] + self.assertEqual(self.johndoe.get_keys(), { + 'username': 'johndoe', + 'date_joined': 12345, + }) + + def test_get_raw_keys(self): + # Setup the data. + self.table.schema = [ + HashKey('username'), + RangeKey('date_joined'), + ] + self.assertEqual(self.johndoe.get_raw_keys(), { + 'username': {'S': 'johndoe'}, + 'date_joined': {'N': '12345'}, + }) + + def test_build_expects(self): + # Pristine. + self.assertEqual(self.johndoe.build_expects(), { + 'first_name': { + 'Exists': False, + }, + 'username': { + 'Exists': False, + }, + 'date_joined': { + 'Exists': False, + }, + }) + + # Without modifications. + self.johndoe.mark_clean() + self.assertEqual(self.johndoe.build_expects(), { + 'first_name': { + 'Exists': True, + 'Value': { + 'S': 'John', + }, + }, + 'username': { + 'Exists': True, + 'Value': { + 'S': 'johndoe', + }, + }, + 'date_joined': { + 'Exists': True, + 'Value': { + 'N': '12345', + }, + }, + }) + + # Change some data. + self.johndoe['first_name'] = 'Johann' + # Add some data. + self.johndoe['last_name'] = 'Doe' + # Delete some data. + del self.johndoe['date_joined'] + + # All fields (default). + self.assertEqual(self.johndoe.build_expects(), { + 'first_name': { + 'Exists': True, + 'Value': { + 'S': 'John', + }, + }, + 'last_name': { + 'Exists': False, + }, + 'username': { + 'Exists': True, + 'Value': { + 'S': 'johndoe', + }, + }, + 'date_joined': { + 'Exists': True, + 'Value': { + 'N': '12345', + }, + }, + }) + + # Only a subset of the fields. + self.assertEqual(self.johndoe.build_expects(fields=[ + 'first_name', + 'last_name', + 'date_joined', + ]), { + 'first_name': { + 'Exists': True, + 'Value': { + 'S': 'John', + }, + }, + 'last_name': { + 'Exists': False, + }, + 'date_joined': { + 'Exists': True, + 'Value': { + 'N': '12345', + }, + }, + }) + + def test_prepare_full(self): + self.assertEqual(self.johndoe.prepare_full(), { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'date_joined': {'N': '12345'} + }) + + self.johndoe['friends'] = set(['jane', 'alice']) + data = self.johndoe.prepare_full() + self.assertEqual(data['username'], {'S': 'johndoe'}) + self.assertEqual(data['first_name'], {'S': 'John'}) + self.assertEqual(data['date_joined'], {'N': '12345'}) + self.assertCountEqual(data['friends']['SS'], + ['jane', 'alice']) + + def test_prepare_full_empty_set(self): + self.johndoe['friends'] = set() + self.assertEqual(self.johndoe.prepare_full(), { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'date_joined': {'N': '12345'} + }) + + def test_prepare_partial(self): + self.johndoe.mark_clean() + # Change some data. + self.johndoe['first_name'] = 'Johann' + # Add some data. + self.johndoe['last_name'] = 'Doe' + # Delete some data. + del self.johndoe['date_joined'] + + final_data, fields = self.johndoe.prepare_partial() + self.assertEqual(final_data, { + 'date_joined': { + 'Action': 'DELETE', + }, + 'first_name': { + 'Action': 'PUT', + 'Value': {'S': 'Johann'}, + }, + 'last_name': { + 'Action': 'PUT', + 'Value': {'S': 'Doe'}, + }, + }) + self.assertEqual(fields, set([ + 'first_name', + 'last_name', + 'date_joined' + ])) + + def test_prepare_partial_empty_set(self): + self.johndoe.mark_clean() + # Change some data. + self.johndoe['first_name'] = 'Johann' + # Add some data. + self.johndoe['last_name'] = 'Doe' + # Delete some data. + del self.johndoe['date_joined'] + # Put an empty set on the ``Item``. + self.johndoe['friends'] = set() + + final_data, fields = self.johndoe.prepare_partial() + self.assertEqual(final_data, { + 'date_joined': { + 'Action': 'DELETE', + }, + 'first_name': { + 'Action': 'PUT', + 'Value': {'S': 'Johann'}, + }, + 'last_name': { + 'Action': 'PUT', + 'Value': {'S': 'Doe'}, + }, + }) + self.assertEqual(fields, set([ + 'first_name', + 'last_name', + 'date_joined' + ])) + + def test_save_no_changes(self): + # Unchanged, no save. + with mock.patch.object(self.table, '_put_item', return_value=True) \ + as mock_put_item: + # Pretend we loaded it via ``get_item``... + self.johndoe.mark_clean() + self.assertFalse(self.johndoe.save()) + + self.assertFalse(mock_put_item.called) + + def test_save_with_changes(self): + # With changed data. + with mock.patch.object(self.table, '_put_item', return_value=True) \ + as mock_put_item: + self.johndoe.mark_clean() + self.johndoe['first_name'] = 'J' + self.johndoe['new_attr'] = 'never_seen_before' + self.assertTrue(self.johndoe.save()) + self.assertFalse(self.johndoe.needs_save()) + + self.assertTrue(mock_put_item.called) + mock_put_item.assert_called_once_with({ + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'J'}, + 'new_attr': {'S': 'never_seen_before'}, + 'date_joined': {'N': '12345'} + }, expects={ + 'username': { + 'Value': { + 'S': 'johndoe', + }, + 'Exists': True, + }, + 'first_name': { + 'Value': { + 'S': 'John', + }, + 'Exists': True, + }, + 'new_attr': { + 'Exists': False, + }, + 'date_joined': { + 'Value': { + 'N': '12345', + }, + 'Exists': True, + }, + }) + + def test_save_with_changes_overwrite(self): + # With changed data. + with mock.patch.object(self.table, '_put_item', return_value=True) \ + as mock_put_item: + self.johndoe['first_name'] = 'J' + self.johndoe['new_attr'] = 'never_seen_before' + # OVERWRITE ALL THE THINGS + self.assertTrue(self.johndoe.save(overwrite=True)) + self.assertFalse(self.johndoe.needs_save()) + + self.assertTrue(mock_put_item.called) + mock_put_item.assert_called_once_with({ + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'J'}, + 'new_attr': {'S': 'never_seen_before'}, + 'date_joined': {'N': '12345'} + }, expects=None) + + def test_partial_no_changes(self): + # Unchanged, no save. + with mock.patch.object(self.table, '_update_item', return_value=True) \ + as mock_update_item: + # Pretend we loaded it via ``get_item``... + self.johndoe.mark_clean() + self.assertFalse(self.johndoe.partial_save()) + + self.assertFalse(mock_update_item.called) + + def test_partial_with_changes(self): + # Setup the data. + self.table.schema = [ + HashKey('username'), + ] + + # With changed data. + with mock.patch.object(self.table, '_update_item', return_value=True) \ + as mock_update_item: + # Pretend we loaded it via ``get_item``... + self.johndoe.mark_clean() + # Now... MODIFY!!! + self.johndoe['first_name'] = 'J' + self.johndoe['last_name'] = 'Doe' + del self.johndoe['date_joined'] + self.assertTrue(self.johndoe.partial_save()) + self.assertFalse(self.johndoe.needs_save()) + + self.assertTrue(mock_update_item.called) + mock_update_item.assert_called_once_with({ + 'username': 'johndoe', + }, { + 'first_name': { + 'Action': 'PUT', + 'Value': {'S': 'J'}, + }, + 'last_name': { + 'Action': 'PUT', + 'Value': {'S': 'Doe'}, + }, + 'date_joined': { + 'Action': 'DELETE', + } + }, expects={ + 'first_name': { + 'Value': { + 'S': 'John', + }, + 'Exists': True + }, + 'last_name': { + 'Exists': False + }, + 'date_joined': { + 'Value': { + 'N': '12345', + }, + 'Exists': True + }, + }) + + def test_delete(self): + # Setup the data. + self.table.schema = [ + HashKey('username'), + RangeKey('date_joined'), + ] + + with mock.patch.object(self.table, 'delete_item', return_value=True) \ + as mock_delete_item: + self.johndoe.delete() + + self.assertTrue(mock_delete_item.called) + mock_delete_item.assert_called_once_with( + username='johndoe', + date_joined=12345 + ) + + def test_nonzero(self): + self.assertTrue(self.johndoe) + self.assertFalse(self.create_item({})) + + +class ItemFromItemTestCase(ItemTestCase): + def setUp(self): + super(ItemFromItemTestCase, self).setUp() + self.johndoe = self.create_item(self.johndoe) + + +def fake_results(name, greeting='hello', exclusive_start_key=None, limit=None): + if exclusive_start_key is None: + exclusive_start_key = -1 + + if limit == 0: + raise Exception("Web Service Returns '400 Bad Request'") + + end_cap = 13 + results = [] + start_key = exclusive_start_key + 1 + + for i in range(start_key, start_key + 5): + if i < end_cap: + results.append("%s %s #%s" % (greeting, name, i)) + + # Don't return more than limit results + if limit < len(results): + results = results[:limit] + + retval = { + 'results': results, + } + + if exclusive_start_key + 5 < end_cap: + retval['last_key'] = exclusive_start_key + 5 + + return retval + + +class ResultSetTestCase(unittest.TestCase): + def setUp(self): + super(ResultSetTestCase, self).setUp() + self.results = ResultSet() + self.result_function = mock.MagicMock(side_effect=fake_results) + self.results.to_call(self.result_function, 'john', greeting='Hello', limit=20) + + def test_first_key(self): + self.assertEqual(self.results.first_key, 'exclusive_start_key') + + def test_max_page_size_fetch_more(self): + self.results = ResultSet(max_page_size=10) + self.results.to_call(self.result_function, 'john', greeting='Hello') + self.results.fetch_more() + self.result_function.assert_called_with('john', greeting='Hello', limit=10) + self.result_function.reset_mock() + + def test_max_page_size_and_smaller_limit_fetch_more(self): + self.results = ResultSet(max_page_size=10) + self.results.to_call(self.result_function, 'john', greeting='Hello', limit=5) + self.results.fetch_more() + self.result_function.assert_called_with('john', greeting='Hello', limit=5) + self.result_function.reset_mock() + + def test_max_page_size_and_bigger_limit_fetch_more(self): + self.results = ResultSet(max_page_size=10) + self.results.to_call(self.result_function, 'john', greeting='Hello', limit=15) + self.results.fetch_more() + self.result_function.assert_called_with('john', greeting='Hello', limit=10) + self.result_function.reset_mock() + + def test_fetch_more(self): + # First "page". + self.results.fetch_more() + self.assertEqual(self.results._results, [ + 'Hello john #0', + 'Hello john #1', + 'Hello john #2', + 'Hello john #3', + 'Hello john #4', + ]) + + self.result_function.assert_called_with('john', greeting='Hello', limit=20) + self.result_function.reset_mock() + + # Fake in a last key. + self.results._last_key_seen = 4 + # Second "page". + self.results.fetch_more() + self.assertEqual(self.results._results, [ + 'Hello john #5', + 'Hello john #6', + 'Hello john #7', + 'Hello john #8', + 'Hello john #9', + ]) + + self.result_function.assert_called_with('john', greeting='Hello', limit=20, exclusive_start_key=4) + self.result_function.reset_mock() + + # Fake in a last key. + self.results._last_key_seen = 9 + # Last "page". + self.results.fetch_more() + self.assertEqual(self.results._results, [ + 'Hello john #10', + 'Hello john #11', + 'Hello john #12', + ]) + + # Fake in a key outside the range. + self.results._last_key_seen = 15 + # Empty "page". Nothing new gets added + self.results.fetch_more() + self.assertEqual(self.results._results, []) + + # Make sure we won't check for results in the future. + self.assertFalse(self.results._results_left) + + def test_iteration(self): + # First page. + self.assertEqual(next(self.results), 'Hello john #0') + self.assertEqual(next(self.results), 'Hello john #1') + self.assertEqual(next(self.results), 'Hello john #2') + self.assertEqual(next(self.results), 'Hello john #3') + self.assertEqual(next(self.results), 'Hello john #4') + self.assertEqual(self.results._limit, 15) + # Second page. + self.assertEqual(next(self.results), 'Hello john #5') + self.assertEqual(next(self.results), 'Hello john #6') + self.assertEqual(next(self.results), 'Hello john #7') + self.assertEqual(next(self.results), 'Hello john #8') + self.assertEqual(next(self.results), 'Hello john #9') + self.assertEqual(self.results._limit, 10) + # Third page. + self.assertEqual(next(self.results), 'Hello john #10') + self.assertEqual(next(self.results), 'Hello john #11') + self.assertEqual(next(self.results), 'Hello john #12') + self.assertRaises(StopIteration, self.results.next) + self.assertEqual(self.results._limit, 7) + + def test_limit_smaller_than_first_page(self): + results = ResultSet() + results.to_call(fake_results, 'john', greeting='Hello', limit=2) + self.assertEqual(next(results), 'Hello john #0') + self.assertEqual(next(results), 'Hello john #1') + self.assertRaises(StopIteration, results.next) + + def test_limit_equals_page(self): + results = ResultSet() + results.to_call(fake_results, 'john', greeting='Hello', limit=5) + # First page + self.assertEqual(next(results), 'Hello john #0') + self.assertEqual(next(results), 'Hello john #1') + self.assertEqual(next(results), 'Hello john #2') + self.assertEqual(next(results), 'Hello john #3') + self.assertEqual(next(results), 'Hello john #4') + self.assertRaises(StopIteration, results.next) + + def test_limit_greater_than_page(self): + results = ResultSet() + results.to_call(fake_results, 'john', greeting='Hello', limit=6) + # First page + self.assertEqual(next(results), 'Hello john #0') + self.assertEqual(next(results), 'Hello john #1') + self.assertEqual(next(results), 'Hello john #2') + self.assertEqual(next(results), 'Hello john #3') + self.assertEqual(next(results), 'Hello john #4') + # Second page + self.assertEqual(next(results), 'Hello john #5') + self.assertRaises(StopIteration, results.next) + + def test_iteration_noresults(self): + def none(limit=10): + return { + 'results': [], + } + + results = ResultSet() + results.to_call(none, limit=20) + self.assertRaises(StopIteration, results.next) + + def test_iteration_sporadic_pages(self): + # Some pages have no/incomplete results but have a ``LastEvaluatedKey`` + # (for instance, scans with filters), so we need to accommodate that. + def sporadic(): + # A dict, because Python closures have read-only access to the + # reference itself. + count = {'value': -1} + + def _wrapper(limit=10, exclusive_start_key=None): + count['value'] = count['value'] + 1 + + if count['value'] == 0: + # Full page. + return { + 'results': [ + 'Result #0', + 'Result #1', + 'Result #2', + 'Result #3', + ], + 'last_key': 'page-1' + } + elif count['value'] == 1: + # Empty page but continue. + return { + 'results': [], + 'last_key': 'page-2' + } + elif count['value'] == 2: + # Final page. + return { + 'results': [ + 'Result #4', + 'Result #5', + 'Result #6', + ], + } + + return _wrapper + + results = ResultSet() + results.to_call(sporadic(), limit=20) + # First page + self.assertEqual(next(results), 'Result #0') + self.assertEqual(next(results), 'Result #1') + self.assertEqual(next(results), 'Result #2') + self.assertEqual(next(results), 'Result #3') + # Second page (misses!) + # Moves on to the third page + self.assertEqual(next(results), 'Result #4') + self.assertEqual(next(results), 'Result #5') + self.assertEqual(next(results), 'Result #6') + self.assertRaises(StopIteration, results.next) + + def test_list(self): + self.assertEqual(list(self.results), [ + 'Hello john #0', + 'Hello john #1', + 'Hello john #2', + 'Hello john #3', + 'Hello john #4', + 'Hello john #5', + 'Hello john #6', + 'Hello john #7', + 'Hello john #8', + 'Hello john #9', + 'Hello john #10', + 'Hello john #11', + 'Hello john #12' + ]) + + +def fake_batch_results(keys): + results = [] + simulate_unprocessed = True + + if len(keys) and keys[0] == 'johndoe': + simulate_unprocessed = False + + for key in keys: + if simulate_unprocessed and key == 'johndoe': + continue + + results.append("hello %s" % key) + + retval = { + 'results': results, + 'last_key': None, + } + + if simulate_unprocessed: + retval['unprocessed_keys'] = ['johndoe'] + + return retval + + +class BatchGetResultSetTestCase(unittest.TestCase): + def setUp(self): + super(BatchGetResultSetTestCase, self).setUp() + self.results = BatchGetResultSet(keys=[ + 'alice', + 'bob', + 'jane', + 'johndoe', + ]) + self.results.to_call(fake_batch_results) + + def test_fetch_more(self): + # First "page". + self.results.fetch_more() + self.assertEqual(self.results._results, [ + 'hello alice', + 'hello bob', + 'hello jane', + ]) + self.assertEqual(self.results._keys_left, ['johndoe']) + + # Second "page". + self.results.fetch_more() + self.assertEqual(self.results._results, [ + 'hello johndoe', + ]) + + # Empty "page". Nothing new gets added + self.results.fetch_more() + self.assertEqual(self.results._results, []) + + # Make sure we won't check for results in the future. + self.assertFalse(self.results._results_left) + + def test_fetch_more_empty(self): + self.results.to_call(lambda keys: {'results': [], 'last_key': None}) + + self.results.fetch_more() + self.assertEqual(self.results._results, []) + self.assertRaises(StopIteration, self.results.next) + + def test_iteration(self): + # First page. + self.assertEqual(next(self.results), 'hello alice') + self.assertEqual(next(self.results), 'hello bob') + self.assertEqual(next(self.results), 'hello jane') + self.assertEqual(next(self.results), 'hello johndoe') + self.assertRaises(StopIteration, self.results.next) + + +class TableTestCase(unittest.TestCase): + def setUp(self): + super(TableTestCase, self).setUp() + self.users = Table('users', connection=FakeDynamoDBConnection()) + self.default_connection = DynamoDBConnection( + aws_access_key_id='access_key', + aws_secret_access_key='secret_key' + ) + + def test__introspect_schema(self): + raw_schema_1 = [ + { + "AttributeName": "username", + "KeyType": "HASH" + }, + { + "AttributeName": "date_joined", + "KeyType": "RANGE" + } + ] + raw_attributes_1 = [ + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'date_joined', + 'AttributeType': 'S' + }, + ] + schema_1 = self.users._introspect_schema(raw_schema_1, raw_attributes_1) + self.assertEqual(len(schema_1), 2) + self.assertTrue(isinstance(schema_1[0], HashKey)) + self.assertEqual(schema_1[0].name, 'username') + self.assertTrue(isinstance(schema_1[1], RangeKey)) + self.assertEqual(schema_1[1].name, 'date_joined') + + raw_schema_2 = [ + { + "AttributeName": "username", + "KeyType": "BTREE" + }, + ] + raw_attributes_2 = [ + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + ] + self.assertRaises( + exceptions.UnknownSchemaFieldError, + self.users._introspect_schema, + raw_schema_2, + raw_attributes_2 + ) + + # Test a complex schema & ensure the types come back correctly. + raw_schema_3 = [ + { + "AttributeName": "user_id", + "KeyType": "HASH" + }, + { + "AttributeName": "junk", + "KeyType": "RANGE" + } + ] + raw_attributes_3 = [ + { + 'AttributeName': 'user_id', + 'AttributeType': 'N' + }, + { + 'AttributeName': 'junk', + 'AttributeType': 'B' + }, + ] + schema_3 = self.users._introspect_schema(raw_schema_3, raw_attributes_3) + self.assertEqual(len(schema_3), 2) + self.assertTrue(isinstance(schema_3[0], HashKey)) + self.assertEqual(schema_3[0].name, 'user_id') + self.assertEqual(schema_3[0].data_type, NUMBER) + self.assertTrue(isinstance(schema_3[1], RangeKey)) + self.assertEqual(schema_3[1].name, 'junk') + self.assertEqual(schema_3[1].data_type, BINARY) + + def test__introspect_indexes(self): + raw_indexes_1 = [ + { + "IndexName": "MostRecentlyJoinedIndex", + "KeySchema": [ + { + "AttributeName": "username", + "KeyType": "HASH" + }, + { + "AttributeName": "date_joined", + "KeyType": "RANGE" + } + ], + "Projection": { + "ProjectionType": "KEYS_ONLY" + } + }, + { + "IndexName": "EverybodyIndex", + "KeySchema": [ + { + "AttributeName": "username", + "KeyType": "HASH" + }, + ], + "Projection": { + "ProjectionType": "ALL" + } + }, + { + "IndexName": "GenderIndex", + "KeySchema": [ + { + "AttributeName": "username", + "KeyType": "HASH" + }, + { + "AttributeName": "date_joined", + "KeyType": "RANGE" + } + ], + "Projection": { + "ProjectionType": "INCLUDE", + "NonKeyAttributes": [ + 'gender', + ] + } + } + ] + indexes_1 = self.users._introspect_indexes(raw_indexes_1) + self.assertEqual(len(indexes_1), 3) + self.assertTrue(isinstance(indexes_1[0], KeysOnlyIndex)) + self.assertEqual(indexes_1[0].name, 'MostRecentlyJoinedIndex') + self.assertEqual(len(indexes_1[0].parts), 2) + self.assertTrue(isinstance(indexes_1[1], AllIndex)) + self.assertEqual(indexes_1[1].name, 'EverybodyIndex') + self.assertEqual(len(indexes_1[1].parts), 1) + self.assertTrue(isinstance(indexes_1[2], IncludeIndex)) + self.assertEqual(indexes_1[2].name, 'GenderIndex') + self.assertEqual(len(indexes_1[2].parts), 2) + self.assertEqual(indexes_1[2].includes_fields, ['gender']) + + raw_indexes_2 = [ + { + "IndexName": "MostRecentlyJoinedIndex", + "KeySchema": [ + { + "AttributeName": "username", + "KeyType": "HASH" + }, + { + "AttributeName": "date_joined", + "KeyType": "RANGE" + } + ], + "Projection": { + "ProjectionType": "SOMETHING_CRAZY" + } + }, + ] + self.assertRaises( + exceptions.UnknownIndexFieldError, + self.users._introspect_indexes, + raw_indexes_2 + ) + + def test_initialization(self): + users = Table('users', connection=self.default_connection) + self.assertEqual(users.table_name, 'users') + self.assertTrue(isinstance(users.connection, DynamoDBConnection)) + self.assertEqual(users.throughput['read'], 5) + self.assertEqual(users.throughput['write'], 5) + self.assertEqual(users.schema, None) + self.assertEqual(users.indexes, None) + + groups = Table('groups', connection=FakeDynamoDBConnection()) + self.assertEqual(groups.table_name, 'groups') + self.assertTrue(hasattr(groups.connection, 'assert_called_once_with')) + + def test_create_simple(self): + conn = FakeDynamoDBConnection() + + with mock.patch.object(conn, 'create_table', return_value={}) \ + as mock_create_table: + retval = Table.create('users', schema=[ + HashKey('username'), + RangeKey('date_joined', data_type=NUMBER) + ], connection=conn) + self.assertTrue(retval) + + self.assertTrue(mock_create_table.called) + mock_create_table.assert_called_once_with(attribute_definitions=[ + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'date_joined', + 'AttributeType': 'N' + } + ], + table_name='users', + key_schema=[ + { + 'KeyType': 'HASH', + 'AttributeName': 'username' + }, + { + 'KeyType': 'RANGE', + 'AttributeName': 'date_joined' + } + ], + provisioned_throughput={ + 'WriteCapacityUnits': 5, + 'ReadCapacityUnits': 5 + }) + + def test_create_full(self): + conn = FakeDynamoDBConnection() + + with mock.patch.object(conn, 'create_table', return_value={}) \ + as mock_create_table: + retval = Table.create('users', schema=[ + HashKey('username'), + RangeKey('date_joined', data_type=NUMBER) + ], throughput={ + 'read':20, + 'write': 10, + }, indexes=[ + KeysOnlyIndex('FriendCountIndex', parts=[ + RangeKey('friend_count') + ]), + ], global_indexes=[ + GlobalKeysOnlyIndex('FullFriendCountIndex', parts=[ + RangeKey('friend_count') + ], throughput={ + 'read': 10, + 'write': 8, + }), + ], connection=conn) + self.assertTrue(retval) + + self.assertTrue(mock_create_table.called) + mock_create_table.assert_called_once_with(attribute_definitions=[ + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'date_joined', + 'AttributeType': 'N' + }, + { + 'AttributeName': 'friend_count', + 'AttributeType': 'S' + } + ], + key_schema=[ + { + 'KeyType': 'HASH', + 'AttributeName': 'username' + }, + { + 'KeyType': 'RANGE', + 'AttributeName': 'date_joined' + } + ], + table_name='users', + provisioned_throughput={ + 'WriteCapacityUnits': 10, + 'ReadCapacityUnits': 20 + }, + global_secondary_indexes=[ + { + 'KeySchema': [ + { + 'KeyType': 'RANGE', + 'AttributeName': 'friend_count' + } + ], + 'IndexName': 'FullFriendCountIndex', + 'Projection': { + 'ProjectionType': 'KEYS_ONLY' + }, + 'ProvisionedThroughput': { + 'WriteCapacityUnits': 8, + 'ReadCapacityUnits': 10 + } + } + ], + local_secondary_indexes=[ + { + 'KeySchema': [ + { + 'KeyType': 'RANGE', + 'AttributeName': 'friend_count' + } + ], + 'IndexName': 'FriendCountIndex', + 'Projection': { + 'ProjectionType': 'KEYS_ONLY' + } + } + ]) + + def test_describe(self): + expected = { + "Table": { + "AttributeDefinitions": [ + { + "AttributeName": "username", + "AttributeType": "S" + } + ], + "ItemCount": 5, + "KeySchema": [ + { + "AttributeName": "username", + "KeyType": "HASH" + } + ], + "LocalSecondaryIndexes": [ + { + "IndexName": "UsernameIndex", + "KeySchema": [ + { + "AttributeName": "username", + "KeyType": "HASH" + } + ], + "Projection": { + "ProjectionType": "KEYS_ONLY" + } + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 20, + "WriteCapacityUnits": 6 + }, + "TableName": "Thread", + "TableStatus": "ACTIVE" + } + } + + with mock.patch.object( + self.users.connection, + 'describe_table', + return_value=expected) as mock_describe: + self.assertEqual(self.users.throughput['read'], 5) + self.assertEqual(self.users.throughput['write'], 5) + self.assertEqual(self.users.schema, None) + self.assertEqual(self.users.indexes, None) + + self.users.describe() + + self.assertEqual(self.users.throughput['read'], 20) + self.assertEqual(self.users.throughput['write'], 6) + self.assertEqual(len(self.users.schema), 1) + self.assertEqual(isinstance(self.users.schema[0], HashKey), 1) + self.assertEqual(len(self.users.indexes), 1) + + mock_describe.assert_called_once_with('users') + + def test_update(self): + with mock.patch.object( + self.users.connection, + 'update_table', + return_value={}) as mock_update: + self.assertEqual(self.users.throughput['read'], 5) + self.assertEqual(self.users.throughput['write'], 5) + self.users.update(throughput={ + 'read': 7, + 'write': 2, + }) + self.assertEqual(self.users.throughput['read'], 7) + self.assertEqual(self.users.throughput['write'], 2) + + mock_update.assert_called_once_with( + 'users', + global_secondary_index_updates=None, + provisioned_throughput={ + 'WriteCapacityUnits': 2, + 'ReadCapacityUnits': 7 + } + ) + + with mock.patch.object( + self.users.connection, + 'update_table', + return_value={}) as mock_update: + self.assertEqual(self.users.throughput['read'], 7) + self.assertEqual(self.users.throughput['write'], 2) + self.users.update(throughput={ + 'read': 9, + 'write': 5, + }, + global_indexes={ + 'WhateverIndex': { + 'read': 6, + 'write': 1 + }, + 'AnotherIndex': { + 'read': 1, + 'write': 2 + } + }) + self.assertEqual(self.users.throughput['read'], 9) + self.assertEqual(self.users.throughput['write'], 5) + + args, kwargs = mock_update.call_args + self.assertEqual(args, ('users',)) + self.assertEqual(kwargs['provisioned_throughput'], { + 'WriteCapacityUnits': 5, + 'ReadCapacityUnits': 9, + }) + update = kwargs['global_secondary_index_updates'][:] + update.sort(key=lambda x: x['Update']['IndexName']) + self.assertDictEqual( + update[0], + { + 'Update': { + 'IndexName': 'AnotherIndex', + 'ProvisionedThroughput': { + 'WriteCapacityUnits': 2, + 'ReadCapacityUnits': 1 + } + } + }) + self.assertDictEqual( + update[1], + { + 'Update': { + 'IndexName': 'WhateverIndex', + 'ProvisionedThroughput': { + 'WriteCapacityUnits': 1, + 'ReadCapacityUnits': 6 + } + } + }) + + def test_create_global_secondary_index(self): + with mock.patch.object( + self.users.connection, + 'update_table', + return_value={}) as mock_update: + self.users.create_global_secondary_index( + global_index=GlobalAllIndex( + 'JustCreatedIndex', + parts=[ + HashKey('requiredHashKey') + ], + throughput={ + 'read': 2, + 'write': 2 + } + ) + ) + + mock_update.assert_called_once_with( + 'users', + global_secondary_index_updates=[ + { + 'Create': { + 'IndexName': 'JustCreatedIndex', + 'KeySchema': [ + { + 'KeyType': 'HASH', + 'AttributeName': 'requiredHashKey' + } + ], + 'Projection': { + 'ProjectionType': 'ALL' + }, + 'ProvisionedThroughput': { + 'WriteCapacityUnits': 2, + 'ReadCapacityUnits': 2 + } + } + } + ], + attribute_definitions=[ + { + 'AttributeName': 'requiredHashKey', + 'AttributeType': 'S' + } + ] + ) + + def test_delete_global_secondary_index(self): + with mock.patch.object( + self.users.connection, + 'update_table', + return_value={}) as mock_update: + self.users.delete_global_secondary_index('RandomGSIIndex') + + mock_update.assert_called_once_with( + 'users', + global_secondary_index_updates=[ + { + 'Delete': { + 'IndexName': 'RandomGSIIndex', + } + } + ] + ) + + def test_update_global_secondary_index(self): + # Updating a single global secondary index + with mock.patch.object( + self.users.connection, + 'update_table', + return_value={}) as mock_update: + self.users.update_global_secondary_index(global_indexes={ + 'A_IndexToBeUpdated': { + 'read': 5, + 'write': 5 + } + }) + + mock_update.assert_called_once_with( + 'users', + global_secondary_index_updates=[ + { + 'Update': { + 'IndexName': 'A_IndexToBeUpdated', + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + }, + } + } + ] + ) + + # Updating multiple global secondary indexes + with mock.patch.object( + self.users.connection, + 'update_table', + return_value={}) as mock_update: + self.users.update_global_secondary_index(global_indexes={ + 'A_IndexToBeUpdated': { + 'read': 5, + 'write': 5 + }, + 'B_IndexToBeUpdated': { + 'read': 9, + 'write': 9 + } + }) + + args, kwargs = mock_update.call_args + self.assertEqual(args, ('users',)) + update = kwargs['global_secondary_index_updates'][:] + update.sort(key=lambda x: x['Update']['IndexName']) + self.assertDictEqual( + update[0], + { + 'Update': { + 'IndexName': 'A_IndexToBeUpdated', + 'ProvisionedThroughput': { + 'WriteCapacityUnits': 5, + 'ReadCapacityUnits': 5 + } + } + }) + self.assertDictEqual( + update[1], + { + 'Update': { + 'IndexName': 'B_IndexToBeUpdated', + 'ProvisionedThroughput': { + 'WriteCapacityUnits': 9, + 'ReadCapacityUnits': 9 + } + } + }) + + def test_delete(self): + with mock.patch.object( + self.users.connection, + 'delete_table', + return_value={}) as mock_delete: + self.assertTrue(self.users.delete()) + + mock_delete.assert_called_once_with('users') + + def test_get_item(self): + expected = { + 'Item': { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056668'}, + 'friend_count': {'N': '3'}, + 'friends': {'SS': ['alice', 'bob', 'jane']}, + } + } + + with mock.patch.object( + self.users.connection, + 'get_item', + return_value=expected) as mock_get_item: + item = self.users.get_item(username='johndoe') + self.assertEqual(item['username'], 'johndoe') + self.assertEqual(item['first_name'], 'John') + + mock_get_item.assert_called_once_with('users', { + 'username': {'S': 'johndoe'} + }, consistent_read=False, attributes_to_get=None) + + with mock.patch.object( + self.users.connection, + 'get_item', + return_value=expected) as mock_get_item: + item = self.users.get_item(username='johndoe', attributes=[ + 'username', + 'first_name', + ]) + + mock_get_item.assert_called_once_with('users', { + 'username': {'S': 'johndoe'} + }, consistent_read=False, attributes_to_get=['username', 'first_name']) + + def test_has_item(self): + expected = { + 'Item': { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056668'}, + 'friend_count': {'N': '3'}, + 'friends': {'SS': ['alice', 'bob', 'jane']}, + } + } + + with mock.patch.object( + self.users.connection, + 'get_item', + return_value=expected) as mock_get_item: + found = self.users.has_item(username='johndoe') + self.assertTrue(found) + + with mock.patch.object( + self.users.connection, + 'get_item') as mock_get_item: + mock_get_item.side_effect = JSONResponseError("Nope.", None, None) + found = self.users.has_item(username='mrsmith') + self.assertFalse(found) + + def test_lookup_hash(self): + """Tests the "lookup" function with just a hash key""" + expected = { + 'Item': { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056668'}, + 'friend_count': {'N': '3'}, + 'friends': {'SS': ['alice', 'bob', 'jane']}, + } + } + + # Set the Schema + self.users.schema = [ + HashKey('username'), + RangeKey('date_joined', data_type=NUMBER), + ] + + with mock.patch.object( + self.users, + 'get_item', + return_value=expected) as mock_get_item: + self.users.lookup('johndoe') + + mock_get_item.assert_called_once_with( + username= 'johndoe') + + def test_lookup_hash_and_range(self): + """Test the "lookup" function with a hash and range key""" + expected = { + 'Item': { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056668'}, + 'friend_count': {'N': '3'}, + 'friends': {'SS': ['alice', 'bob', 'jane']}, + } + } + + # Set the Schema + self.users.schema = [ + HashKey('username'), + RangeKey('date_joined', data_type=NUMBER), + ] + + with mock.patch.object( + self.users, + 'get_item', + return_value=expected) as mock_get_item: + self.users.lookup('johndoe', 1366056668) + + mock_get_item.assert_called_once_with( + username= 'johndoe', + date_joined= 1366056668) + + def test_put_item(self): + with mock.patch.object( + self.users.connection, + 'put_item', + return_value={}) as mock_put_item: + self.users.put_item(data={ + 'username': 'johndoe', + 'last_name': 'Doe', + 'date_joined': 12345, + }) + + mock_put_item.assert_called_once_with('users', { + 'username': {'S': 'johndoe'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '12345'} + }, expected={ + 'username': { + 'Exists': False, + }, + 'last_name': { + 'Exists': False, + }, + 'date_joined': { + 'Exists': False, + } + }) + + def test_private_put_item(self): + with mock.patch.object( + self.users.connection, + 'put_item', + return_value={}) as mock_put_item: + self.users._put_item({'some': 'data'}) + + mock_put_item.assert_called_once_with('users', {'some': 'data'}) + + def test_private_update_item(self): + with mock.patch.object( + self.users.connection, + 'update_item', + return_value={}) as mock_update_item: + self.users._update_item({ + 'username': 'johndoe' + }, { + 'some': 'data', + }) + + mock_update_item.assert_called_once_with('users', { + 'username': {'S': 'johndoe'}, + }, { + 'some': 'data', + }) + + def test_delete_item(self): + with mock.patch.object( + self.users.connection, + 'delete_item', + return_value={}) as mock_delete_item: + self.assertTrue(self.users.delete_item(username='johndoe', date_joined=23456)) + + mock_delete_item.assert_called_once_with('users', { + 'username': { + 'S': 'johndoe' + }, + 'date_joined': { + 'N': '23456' + } + }, expected=None, conditional_operator=None) + + def test_delete_item_conditionally(self): + with mock.patch.object( + self.users.connection, + 'delete_item', + return_value={}) as mock_delete_item: + self.assertTrue(self.users.delete_item(expected={'balance__eq': 0}, + username='johndoe', date_joined=23456)) + + mock_delete_item.assert_called_once_with('users', { + 'username': { + 'S': 'johndoe' + }, + 'date_joined': { + 'N': '23456' + } + }, + expected={ + 'balance': { + 'ComparisonOperator': 'EQ', 'AttributeValueList': [{'N': '0'}] + }, + }, + conditional_operator=None) + + def side_effect(*args, **kwargs): + raise exceptions.ConditionalCheckFailedException(400, '', {}) + + with mock.patch.object( + self.users.connection, + 'delete_item', + side_effect=side_effect) as mock_delete_item: + self.assertFalse(self.users.delete_item(expected={'balance__eq': 0}, + username='johndoe', date_joined=23456)) + + def test_get_key_fields_no_schema_populated(self): + expected = { + "Table": { + "AttributeDefinitions": [ + { + "AttributeName": "username", + "AttributeType": "S" + }, + { + "AttributeName": "date_joined", + "AttributeType": "N" + } + ], + "ItemCount": 5, + "KeySchema": [ + { + "AttributeName": "username", + "KeyType": "HASH" + }, + { + "AttributeName": "date_joined", + "KeyType": "RANGE" + } + ], + "LocalSecondaryIndexes": [ + { + "IndexName": "UsernameIndex", + "KeySchema": [ + { + "AttributeName": "username", + "KeyType": "HASH" + } + ], + "Projection": { + "ProjectionType": "KEYS_ONLY" + } + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 20, + "WriteCapacityUnits": 6 + }, + "TableName": "Thread", + "TableStatus": "ACTIVE" + } + } + + with mock.patch.object( + self.users.connection, + 'describe_table', + return_value=expected) as mock_describe: + self.assertEqual(self.users.schema, None) + + key_fields = self.users.get_key_fields() + self.assertEqual(key_fields, ['username', 'date_joined']) + + self.assertEqual(len(self.users.schema), 2) + + mock_describe.assert_called_once_with('users') + + def test_batch_write_no_writes(self): + with mock.patch.object( + self.users.connection, + 'batch_write_item', + return_value={}) as mock_batch: + with self.users.batch_write() as batch: + pass + + self.assertFalse(mock_batch.called) + + def test_batch_write(self): + with mock.patch.object( + self.users.connection, + 'batch_write_item', + return_value={}) as mock_batch: + with self.users.batch_write() as batch: + batch.put_item(data={ + 'username': 'jane', + 'date_joined': 12342547 + }) + batch.delete_item(username='johndoe') + batch.put_item(data={ + 'username': 'alice', + 'date_joined': 12342888 + }) + + mock_batch.assert_called_once_with({ + 'users': [ + { + 'PutRequest': { + 'Item': { + 'username': {'S': 'jane'}, + 'date_joined': {'N': '12342547'} + } + } + }, + { + 'PutRequest': { + 'Item': { + 'username': {'S': 'alice'}, + 'date_joined': {'N': '12342888'} + } + } + }, + { + 'DeleteRequest': { + 'Key': { + 'username': {'S': 'johndoe'}, + } + } + }, + ] + }) + + def test_batch_write_dont_swallow_exceptions(self): + with mock.patch.object( + self.users.connection, + 'batch_write_item', + return_value={}) as mock_batch: + try: + with self.users.batch_write() as batch: + raise Exception('OH NOES') + except Exception as e: + self.assertEqual(str(e), 'OH NOES') + + self.assertFalse(mock_batch.called) + + def test_batch_write_flushing(self): + with mock.patch.object( + self.users.connection, + 'batch_write_item', + return_value={}) as mock_batch: + with self.users.batch_write() as batch: + batch.put_item(data={ + 'username': 'jane', + 'date_joined': 12342547 + }) + # This would only be enough for one batch. + batch.delete_item(username='johndoe1') + batch.delete_item(username='johndoe2') + batch.delete_item(username='johndoe3') + batch.delete_item(username='johndoe4') + batch.delete_item(username='johndoe5') + batch.delete_item(username='johndoe6') + batch.delete_item(username='johndoe7') + batch.delete_item(username='johndoe8') + batch.delete_item(username='johndoe9') + batch.delete_item(username='johndoe10') + batch.delete_item(username='johndoe11') + batch.delete_item(username='johndoe12') + batch.delete_item(username='johndoe13') + batch.delete_item(username='johndoe14') + batch.delete_item(username='johndoe15') + batch.delete_item(username='johndoe16') + batch.delete_item(username='johndoe17') + batch.delete_item(username='johndoe18') + batch.delete_item(username='johndoe19') + batch.delete_item(username='johndoe20') + batch.delete_item(username='johndoe21') + batch.delete_item(username='johndoe22') + batch.delete_item(username='johndoe23') + + # We're only at 24 items. No flushing yet. + self.assertEqual(mock_batch.call_count, 0) + + # This pushes it over the edge. A flush happens then we start + # queuing objects again. + batch.delete_item(username='johndoe24') + self.assertEqual(mock_batch.call_count, 1) + # Since we add another, there's enough for a second call to + # flush. + batch.delete_item(username='johndoe25') + + self.assertEqual(mock_batch.call_count, 2) + + def test_batch_write_unprocessed_items(self): + unprocessed = { + 'UnprocessedItems': { + 'users': [ + { + 'PutRequest': { + 'username': { + 'S': 'jane', + }, + 'date_joined': { + 'N': 12342547 + } + }, + }, + ], + }, + } + + # Test enqueuing the unprocessed bits. + with mock.patch.object( + self.users.connection, + 'batch_write_item', + return_value=unprocessed) as mock_batch: + with self.users.batch_write() as batch: + self.assertEqual(len(batch._unprocessed), 0) + + # Trash the ``resend_unprocessed`` method so that we don't + # infinite loop forever here. + batch.resend_unprocessed = lambda: True + + batch.put_item(data={ + 'username': 'jane', + 'date_joined': 12342547 + }) + batch.delete_item(username='johndoe') + batch.put_item(data={ + 'username': 'alice', + 'date_joined': 12342888 + }) + + self.assertEqual(len(batch._unprocessed), 1) + + # Now test resending those unprocessed items. + with mock.patch.object( + self.users.connection, + 'batch_write_item', + return_value={}) as mock_batch: + with self.users.batch_write() as batch: + self.assertEqual(len(batch._unprocessed), 0) + + # Toss in faked unprocessed items, as though a previous batch + # had failed. + batch._unprocessed = [ + { + 'PutRequest': { + 'username': { + 'S': 'jane', + }, + 'date_joined': { + 'N': 12342547 + } + }, + }, + ] + + batch.put_item(data={ + 'username': 'jane', + 'date_joined': 12342547 + }) + batch.delete_item(username='johndoe') + batch.put_item(data={ + 'username': 'alice', + 'date_joined': 12342888 + }) + + # Flush, to make sure everything has been processed. + # Unprocessed items should still be hanging around. + batch.flush() + self.assertEqual(len(batch._unprocessed), 1) + + # Post-exit, this should be emptied. + self.assertEqual(len(batch._unprocessed), 0) + + def test__build_filters(self): + filters = self.users._build_filters({ + 'username__eq': 'johndoe', + 'date_joined__gte': 1234567, + 'age__in': [30, 31, 32, 33], + 'last_name__between': ['danzig', 'only'], + 'first_name__null': False, + 'gender__null': True, + }, using=FILTER_OPERATORS) + self.assertEqual(filters, { + 'username': { + 'AttributeValueList': [ + { + 'S': 'johndoe', + }, + ], + 'ComparisonOperator': 'EQ', + }, + 'date_joined': { + 'AttributeValueList': [ + { + 'N': '1234567', + }, + ], + 'ComparisonOperator': 'GE', + }, + 'age': { + 'AttributeValueList': [ + {'N': '30'}, + {'N': '31'}, + {'N': '32'}, + {'N': '33'}, + ], + 'ComparisonOperator': 'IN', + }, + 'last_name': { + 'AttributeValueList': [{'S': 'danzig'}, {'S': 'only'}], + 'ComparisonOperator': 'BETWEEN', + }, + 'first_name': { + 'ComparisonOperator': 'NOT_NULL' + }, + 'gender': { + 'ComparisonOperator': 'NULL' + }, + }) + + self.assertRaises(exceptions.UnknownFilterTypeError, + self.users._build_filters, + { + 'darling__die': True, + } + ) + + q_filters = self.users._build_filters({ + 'username__eq': 'johndoe', + 'date_joined__gte': 1234567, + 'last_name__between': ['danzig', 'only'], + 'gender__beginswith': 'm', + }, using=QUERY_OPERATORS) + self.assertEqual(q_filters, { + 'username': { + 'AttributeValueList': [ + { + 'S': 'johndoe', + }, + ], + 'ComparisonOperator': 'EQ', + }, + 'date_joined': { + 'AttributeValueList': [ + { + 'N': '1234567', + }, + ], + 'ComparisonOperator': 'GE', + }, + 'last_name': { + 'AttributeValueList': [{'S': 'danzig'}, {'S': 'only'}], + 'ComparisonOperator': 'BETWEEN', + }, + 'gender': { + 'AttributeValueList': [{'S': 'm'}], + 'ComparisonOperator': 'BEGINS_WITH', + }, + }) + + self.assertRaises(exceptions.UnknownFilterTypeError, + self.users._build_filters, + { + 'darling__die': True, + }, + using=QUERY_OPERATORS + ) + self.assertRaises(exceptions.UnknownFilterTypeError, + self.users._build_filters, + { + 'first_name__null': True, + }, + using=QUERY_OPERATORS + ) + + def test_private_query(self): + expected = { + "ConsumedCapacity": { + "CapacityUnits": 0.5, + "TableName": "users" + }, + "Count": 4, + "Items": [ + { + 'username': {'S': 'johndoe'}, + 'first_name': {'S': 'John'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366056668'}, + 'friend_count': {'N': '3'}, + 'friends': {'SS': ['alice', 'bob', 'jane']}, + }, + { + 'username': {'S': 'jane'}, + 'first_name': {'S': 'Jane'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366057777'}, + 'friend_count': {'N': '2'}, + 'friends': {'SS': ['alice', 'johndoe']}, + }, + { + 'username': {'S': 'alice'}, + 'first_name': {'S': 'Alice'}, + 'last_name': {'S': 'Expert'}, + 'date_joined': {'N': '1366056680'}, + 'friend_count': {'N': '1'}, + 'friends': {'SS': ['jane']}, + }, + { + 'username': {'S': 'bob'}, + 'first_name': {'S': 'Bob'}, + 'last_name': {'S': 'Smith'}, + 'date_joined': {'N': '1366056888'}, + 'friend_count': {'N': '1'}, + 'friends': {'SS': ['johndoe']}, + }, + ], + "ScannedCount": 4 + } + + with mock.patch.object( + self.users.connection, + 'query', + return_value=expected) as mock_query: + results = self.users._query( + limit=4, + reverse=True, + username__between=['aaa', 'mmm'] + ) + usernames = [res['username'] for res in results['results']] + self.assertEqual(usernames, ['johndoe', 'jane', 'alice', 'bob']) + self.assertEqual(len(results['results']), 4) + self.assertEqual(results['last_key'], None) + + mock_query.assert_called_once_with('users', + consistent_read=False, + scan_index_forward=False, + index_name=None, + attributes_to_get=None, + limit=4, + key_conditions={ + 'username': { + 'AttributeValueList': [{'S': 'aaa'}, {'S': 'mmm'}], + 'ComparisonOperator': 'BETWEEN', + } + }, + select=None, + query_filter=None, + conditional_operator=None + ) + + # Now alter the expected. + expected['LastEvaluatedKey'] = { + 'username': { + 'S': 'johndoe', + }, + } + + with mock.patch.object( + self.users.connection, + 'query', + return_value=expected) as mock_query_2: + results = self.users._query( + limit=4, + reverse=True, + username__between=['aaa', 'mmm'], + exclusive_start_key={ + 'username': 'adam', + }, + consistent=True, + query_filter=None, + conditional_operator='AND' + ) + usernames = [res['username'] for res in results['results']] + self.assertEqual(usernames, ['johndoe', 'jane', 'alice', 'bob']) + self.assertEqual(len(results['results']), 4) + self.assertEqual(results['last_key'], {'username': 'johndoe'}) + + mock_query_2.assert_called_once_with('users', + key_conditions={ + 'username': { + 'AttributeValueList': [{'S': 'aaa'}, {'S': 'mmm'}], + 'ComparisonOperator': 'BETWEEN', + } + }, + index_name=None, + attributes_to_get=None, + scan_index_forward=False, + limit=4, + exclusive_start_key={ + 'username': { + 'S': 'adam', + }, + }, + consistent_read=True, + select=None, + query_filter=None, + conditional_operator='AND' + ) + + def test_private_scan(self): + expected = { + "ConsumedCapacity": { + "CapacityUnits": 0.5, + "TableName": "users" + }, + "Count": 4, + "Items": [ + { + 'username': {'S': 'alice'}, + 'first_name': {'S': 'Alice'}, + 'last_name': {'S': 'Expert'}, + 'date_joined': {'N': '1366056680'}, + 'friend_count': {'N': '1'}, + 'friends': {'SS': ['jane']}, + }, + { + 'username': {'S': 'bob'}, + 'first_name': {'S': 'Bob'}, + 'last_name': {'S': 'Smith'}, + 'date_joined': {'N': '1366056888'}, + 'friend_count': {'N': '1'}, + 'friends': {'SS': ['johndoe']}, + }, + { + 'username': {'S': 'jane'}, + 'first_name': {'S': 'Jane'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366057777'}, + 'friend_count': {'N': '2'}, + 'friends': {'SS': ['alice', 'johndoe']}, + }, + ], + "ScannedCount": 4 + } + + with mock.patch.object( + self.users.connection, + 'scan', + return_value=expected) as mock_scan: + results = self.users._scan( + limit=2, + friend_count__lte=2 + ) + usernames = [res['username'] for res in results['results']] + self.assertEqual(usernames, ['alice', 'bob', 'jane']) + self.assertEqual(len(results['results']), 3) + self.assertEqual(results['last_key'], None) + + mock_scan.assert_called_once_with('users', + scan_filter={ + 'friend_count': { + 'AttributeValueList': [{'N': '2'}], + 'ComparisonOperator': 'LE', + } + }, + limit=2, + segment=None, + attributes_to_get=None, + total_segments=None, + conditional_operator=None + ) + + # Now alter the expected. + expected['LastEvaluatedKey'] = { + 'username': { + 'S': 'jane', + }, + } + + with mock.patch.object( + self.users.connection, + 'scan', + return_value=expected) as mock_scan_2: + results = self.users._scan( + limit=3, + friend_count__lte=2, + exclusive_start_key={ + 'username': 'adam', + }, + segment=None, + total_segments=None + ) + usernames = [res['username'] for res in results['results']] + self.assertEqual(usernames, ['alice', 'bob', 'jane']) + self.assertEqual(len(results['results']), 3) + self.assertEqual(results['last_key'], {'username': 'jane'}) + + mock_scan_2.assert_called_once_with('users', + scan_filter={ + 'friend_count': { + 'AttributeValueList': [{'N': '2'}], + 'ComparisonOperator': 'LE', + } + }, + limit=3, + exclusive_start_key={ + 'username': { + 'S': 'adam', + }, + }, + segment=None, + attributes_to_get=None, + total_segments=None, + conditional_operator=None + ) + + def test_query(self): + items_1 = { + 'results': [ + Item(self.users, data={ + 'username': 'johndoe', + 'first_name': 'John', + 'last_name': 'Doe', + }), + Item(self.users, data={ + 'username': 'jane', + 'first_name': 'Jane', + 'last_name': 'Doe', + }), + ], + 'last_key': 'jane', + } + + results = self.users.query_2(last_name__eq='Doe') + self.assertTrue(isinstance(results, ResultSet)) + self.assertEqual(len(results._results), 0) + self.assertEqual(results.the_callable, self.users._query) + + with mock.patch.object( + results, + 'the_callable', + return_value=items_1) as mock_query: + res_1 = next(results) + # Now it should be populated. + self.assertEqual(len(results._results), 2) + self.assertEqual(res_1['username'], 'johndoe') + res_2 = next(results) + self.assertEqual(res_2['username'], 'jane') + + self.assertEqual(mock_query.call_count, 1) + + items_2 = { + 'results': [ + Item(self.users, data={ + 'username': 'foodoe', + 'first_name': 'Foo', + 'last_name': 'Doe', + }), + ], + } + + with mock.patch.object( + results, + 'the_callable', + return_value=items_2) as mock_query_2: + res_3 = next(results) + # New results should have been found. + self.assertEqual(len(results._results), 1) + self.assertEqual(res_3['username'], 'foodoe') + + self.assertRaises(StopIteration, results.next) + + self.assertEqual(mock_query_2.call_count, 1) + + def test_query_with_specific_attributes(self): + items_1 = { + 'results': [ + Item(self.users, data={ + 'username': 'johndoe', + }), + Item(self.users, data={ + 'username': 'jane', + }), + ], + 'last_key': 'jane', + } + + results = self.users.query_2(last_name__eq='Doe', + attributes=['username']) + self.assertTrue(isinstance(results, ResultSet)) + self.assertEqual(len(results._results), 0) + self.assertEqual(results.the_callable, self.users._query) + + with mock.patch.object( + results, + 'the_callable', + return_value=items_1) as mock_query: + res_1 = next(results) + # Now it should be populated. + self.assertEqual(len(results._results), 2) + self.assertEqual(res_1['username'], 'johndoe') + self.assertEqual(list(res_1.keys()), ['username']) + res_2 = next(results) + self.assertEqual(res_2['username'], 'jane') + + self.assertEqual(mock_query.call_count, 1) + + def test_scan(self): + items_1 = { + 'results': [ + Item(self.users, data={ + 'username': 'johndoe', + 'first_name': 'John', + 'last_name': 'Doe', + }), + Item(self.users, data={ + 'username': 'jane', + 'first_name': 'Jane', + 'last_name': 'Doe', + }), + ], + 'last_key': 'jane', + } + + results = self.users.scan(last_name__eq='Doe') + self.assertTrue(isinstance(results, ResultSet)) + self.assertEqual(len(results._results), 0) + self.assertEqual(results.the_callable, self.users._scan) + + with mock.patch.object( + results, + 'the_callable', + return_value=items_1) as mock_scan: + res_1 = next(results) + # Now it should be populated. + self.assertEqual(len(results._results), 2) + self.assertEqual(res_1['username'], 'johndoe') + res_2 = next(results) + self.assertEqual(res_2['username'], 'jane') + + self.assertEqual(mock_scan.call_count, 1) + + items_2 = { + 'results': [ + Item(self.users, data={ + 'username': 'zoeydoe', + 'first_name': 'Zoey', + 'last_name': 'Doe', + }), + ], + } + + with mock.patch.object( + results, + 'the_callable', + return_value=items_2) as mock_scan_2: + res_3 = next(results) + # New results should have been found. + self.assertEqual(len(results._results), 1) + self.assertEqual(res_3['username'], 'zoeydoe') + + self.assertRaises(StopIteration, results.next) + + self.assertEqual(mock_scan_2.call_count, 1) + + def test_scan_with_specific_attributes(self): + items_1 = { + 'results': [ + Item(self.users, data={ + 'username': 'johndoe', + }), + Item(self.users, data={ + 'username': 'jane', + }), + ], + 'last_key': 'jane', + } + + results = self.users.scan(attributes=['username']) + self.assertTrue(isinstance(results, ResultSet)) + self.assertEqual(len(results._results), 0) + self.assertEqual(results.the_callable, self.users._scan) + + with mock.patch.object( + results, + 'the_callable', + return_value=items_1) as mock_query: + res_1 = next(results) + # Now it should be populated. + self.assertEqual(len(results._results), 2) + self.assertEqual(res_1['username'], 'johndoe') + self.assertEqual(list(res_1.keys()), ['username']) + res_2 = next(results) + self.assertEqual(res_2['username'], 'jane') + + self.assertEqual(mock_query.call_count, 1) + + def test_count(self): + expected = { + "Table": { + "AttributeDefinitions": [ + { + "AttributeName": "username", + "AttributeType": "S" + } + ], + "ItemCount": 5, + "KeySchema": [ + { + "AttributeName": "username", + "KeyType": "HASH" + } + ], + "LocalSecondaryIndexes": [ + { + "IndexName": "UsernameIndex", + "KeySchema": [ + { + "AttributeName": "username", + "KeyType": "HASH" + } + ], + "Projection": { + "ProjectionType": "KEYS_ONLY" + } + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 20, + "WriteCapacityUnits": 6 + }, + "TableName": "Thread", + "TableStatus": "ACTIVE" + } + } + + with mock.patch.object( + self.users, + 'describe', + return_value=expected) as mock_count: + self.assertEqual(self.users.count(), 5) + + def test_query_count_simple(self): + expected_0 = { + 'Count': 0.0, + } + + expected_1 = { + 'Count': 10.0, + } + + with mock.patch.object( + self.users.connection, + 'query', + return_value=expected_0) as mock_query: + results = self.users.query_count(username__eq='notmyname') + self.assertTrue(isinstance(results, int)) + self.assertEqual(results, 0) + self.assertEqual(mock_query.call_count, 1) + self.assertIn('scan_index_forward', mock_query.call_args[1]) + self.assertEqual(True, mock_query.call_args[1]['scan_index_forward']) + self.assertIn('limit', mock_query.call_args[1]) + self.assertEqual(None, mock_query.call_args[1]['limit']) + + with mock.patch.object( + self.users.connection, + 'query', + return_value=expected_1) as mock_query: + results = self.users.query_count(username__gt='somename', consistent=True, scan_index_forward=False, limit=10) + self.assertTrue(isinstance(results, int)) + self.assertEqual(results, 10) + self.assertEqual(mock_query.call_count, 1) + self.assertIn('scan_index_forward', mock_query.call_args[1]) + self.assertEqual(False, mock_query.call_args[1]['scan_index_forward']) + self.assertIn('limit', mock_query.call_args[1]) + self.assertEqual(10, mock_query.call_args[1]['limit']) + + def test_query_count_paginated(self): + def return_side_effect(*args, **kwargs): + if kwargs.get('exclusive_start_key'): + return {'Count': 10, 'LastEvaluatedKey': None} + else: + return { + 'Count': 20, + 'LastEvaluatedKey': { + 'username': { + 'S': 'johndoe' + }, + 'date_joined': { + 'N': '4118642633' + } + } + } + + with mock.patch.object( + self.users.connection, + 'query', + side_effect=return_side_effect + ) as mock_query: + count = self.users.query_count(username__eq='johndoe') + self.assertTrue(isinstance(count, int)) + self.assertEqual(30, count) + self.assertEqual(mock_query.call_count, 2) + + def test_private_batch_get(self): + expected = { + "ConsumedCapacity": { + "CapacityUnits": 0.5, + "TableName": "users" + }, + 'Responses': { + 'users': [ + { + 'username': {'S': 'alice'}, + 'first_name': {'S': 'Alice'}, + 'last_name': {'S': 'Expert'}, + 'date_joined': {'N': '1366056680'}, + 'friend_count': {'N': '1'}, + 'friends': {'SS': ['jane']}, + }, + { + 'username': {'S': 'bob'}, + 'first_name': {'S': 'Bob'}, + 'last_name': {'S': 'Smith'}, + 'date_joined': {'N': '1366056888'}, + 'friend_count': {'N': '1'}, + 'friends': {'SS': ['johndoe']}, + }, + { + 'username': {'S': 'jane'}, + 'first_name': {'S': 'Jane'}, + 'last_name': {'S': 'Doe'}, + 'date_joined': {'N': '1366057777'}, + 'friend_count': {'N': '2'}, + 'friends': {'SS': ['alice', 'johndoe']}, + }, + ], + }, + "UnprocessedKeys": { + }, + } + + with mock.patch.object( + self.users.connection, + 'batch_get_item', + return_value=expected) as mock_batch_get: + results = self.users._batch_get(keys=[ + {'username': 'alice', 'friend_count': 1}, + {'username': 'bob', 'friend_count': 1}, + {'username': 'jane'}, + ]) + usernames = [res['username'] for res in results['results']] + self.assertEqual(usernames, ['alice', 'bob', 'jane']) + self.assertEqual(len(results['results']), 3) + self.assertEqual(results['last_key'], None) + self.assertEqual(results['unprocessed_keys'], []) + + mock_batch_get.assert_called_once_with(request_items={ + 'users': { + 'Keys': [ + { + 'username': {'S': 'alice'}, + 'friend_count': {'N': '1'} + }, + { + 'username': {'S': 'bob'}, + 'friend_count': {'N': '1'} + }, { + 'username': {'S': 'jane'}, + } + ] + } + }) + + # Now alter the expected. + del expected['Responses']['users'][2] + expected['UnprocessedKeys'] = { + 'Keys': [ + {'username': {'S': 'jane',}}, + ], + } + + with mock.patch.object( + self.users.connection, + 'batch_get_item', + return_value=expected) as mock_batch_get_2: + results = self.users._batch_get(keys=[ + {'username': 'alice', 'friend_count': 1}, + {'username': 'bob', 'friend_count': 1}, + {'username': 'jane'}, + ]) + usernames = [res['username'] for res in results['results']] + self.assertEqual(usernames, ['alice', 'bob']) + self.assertEqual(len(results['results']), 2) + self.assertEqual(results['last_key'], None) + self.assertEqual(results['unprocessed_keys'], [ + {'username': 'jane'} + ]) + + mock_batch_get_2.assert_called_once_with(request_items={ + 'users': { + 'Keys': [ + { + 'username': {'S': 'alice'}, + 'friend_count': {'N': '1'} + }, + { + 'username': {'S': 'bob'}, + 'friend_count': {'N': '1'} + }, { + 'username': {'S': 'jane'}, + } + ] + } + }) + + def test_private_batch_get_attributes(self): + # test if AttributesToGet parameter is passed to DynamoDB API + expected = { + "ConsumedCapacity": { + "CapacityUnits": 0.5, + "TableName": "users" + }, + 'Responses': { + 'users': [ + { + 'username': {'S': 'alice'}, + 'first_name': {'S': 'Alice'}, + }, + { + 'username': {'S': 'bob'}, + 'first_name': {'S': 'Bob'}, + }, + ], + }, + "UnprocessedKeys": {}, + } + + with mock.patch.object( + self.users.connection, + 'batch_get_item', + return_value=expected) as mock_batch_get_attr: + results = self.users._batch_get(keys=[ + {'username': 'alice'}, + {'username': 'bob'}, + ], attributes=['username', 'first_name']) + usernames = [res['username'] for res in results['results']] + first_names = [res['first_name'] for res in results['results']] + self.assertEqual(usernames, ['alice', 'bob']) + self.assertEqual(first_names, ['Alice', 'Bob']) + self.assertEqual(len(results['results']), 2) + self.assertEqual(results['last_key'], None) + self.assertEqual(results['unprocessed_keys'], []) + + mock_batch_get_attr.assert_called_once_with(request_items={ + 'users': { + 'Keys': [ { 'username': {'S': 'alice'} }, + { 'username': {'S': 'bob'} }, ], + 'AttributesToGet': ['username', 'first_name'], + }, + }) + + def test_batch_get(self): + items_1 = { + 'results': [ + Item(self.users, data={ + 'username': 'johndoe', + 'first_name': 'John', + 'last_name': 'Doe', + }), + Item(self.users, data={ + 'username': 'jane', + 'first_name': 'Jane', + 'last_name': 'Doe', + }), + ], + 'last_key': None, + 'unprocessed_keys': [ + 'zoeydoe', + ] + } + + results = self.users.batch_get(keys=[ + {'username': 'johndoe'}, + {'username': 'jane'}, + {'username': 'zoeydoe'}, + ]) + self.assertTrue(isinstance(results, BatchGetResultSet)) + self.assertEqual(len(results._results), 0) + self.assertEqual(results.the_callable, self.users._batch_get) + + with mock.patch.object( + results, + 'the_callable', + return_value=items_1) as mock_batch_get: + res_1 = next(results) + # Now it should be populated. + self.assertEqual(len(results._results), 2) + self.assertEqual(res_1['username'], 'johndoe') + res_2 = next(results) + self.assertEqual(res_2['username'], 'jane') + + self.assertEqual(mock_batch_get.call_count, 1) + self.assertEqual(results._keys_left, ['zoeydoe']) + + items_2 = { + 'results': [ + Item(self.users, data={ + 'username': 'zoeydoe', + 'first_name': 'Zoey', + 'last_name': 'Doe', + }), + ], + } + + with mock.patch.object( + results, + 'the_callable', + return_value=items_2) as mock_batch_get_2: + res_3 = next(results) + # New results should have been found. + self.assertEqual(len(results._results), 1) + self.assertEqual(res_3['username'], 'zoeydoe') + + self.assertRaises(StopIteration, results.next) + + self.assertEqual(mock_batch_get_2.call_count, 1) + self.assertEqual(results._keys_left, []) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/autoscale/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/autoscale/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/autoscale/test_group.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/autoscale/test_group.py new file mode 100644 index 0000000000000000000000000000000000000000..a5df45899a2bd3710ed54c506477140fb79ec6d2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/autoscale/test_group.py @@ -0,0 +1,881 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import base64 +from datetime import datetime + +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.ec2.autoscale import AutoScaleConnection +from boto.ec2.autoscale.group import AutoScalingGroup +from boto.ec2.autoscale.policy import ScalingPolicy +from boto.ec2.autoscale.tag import Tag + +from boto.ec2.blockdevicemapping import EBSBlockDeviceType, BlockDeviceMapping + +from boto.ec2.autoscale import launchconfig, LaunchConfiguration + + +class TestAutoScaleGroup(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestAutoScaleGroup, self).setUp() + + def default_body(self): + return b""" + + + requestid + + + """ + + def test_autoscaling_group_with_termination_policies(self): + self.set_http_response(status_code=200) + autoscale = AutoScalingGroup( + name='foo', launch_config='lauch_config', + min_size=1, max_size=2, + termination_policies=['OldestInstance', 'OldestLaunchConfiguration'], + instance_id='test-id') + self.service_connection.create_auto_scaling_group(autoscale) + self.assert_request_parameters({ + 'Action': 'CreateAutoScalingGroup', + 'AutoScalingGroupName': 'foo', + 'LaunchConfigurationName': 'lauch_config', + 'MaxSize': 2, + 'MinSize': 1, + 'TerminationPolicies.member.1': 'OldestInstance', + 'TerminationPolicies.member.2': 'OldestLaunchConfiguration', + 'InstanceId': 'test-id', + }, ignore_params_values=['Version']) + + def test_autoscaling_group_single_vpc_zone_identifier(self): + self.set_http_response(status_code=200) + autoscale = AutoScalingGroup( + name='foo', + vpc_zone_identifier='vpc_zone_1') + self.service_connection.create_auto_scaling_group(autoscale) + self.assert_request_parameters({ + 'Action': 'CreateAutoScalingGroup', + 'AutoScalingGroupName': 'foo', + 'VPCZoneIdentifier': 'vpc_zone_1', + }, ignore_params_values=['MaxSize', 'MinSize', 'LaunchConfigurationName', 'Version']) + + def test_autoscaling_group_vpc_zone_identifier_list(self): + self.set_http_response(status_code=200) + autoscale = AutoScalingGroup( + name='foo', + vpc_zone_identifier=['vpc_zone_1', 'vpc_zone_2']) + self.service_connection.create_auto_scaling_group(autoscale) + self.assert_request_parameters({ + 'Action': 'CreateAutoScalingGroup', + 'AutoScalingGroupName': 'foo', + 'VPCZoneIdentifier': 'vpc_zone_1,vpc_zone_2', + }, ignore_params_values=['MaxSize', 'MinSize', 'LaunchConfigurationName', 'Version']) + + def test_autoscaling_group_vpc_zone_identifier_multi(self): + self.set_http_response(status_code=200) + autoscale = AutoScalingGroup( + name='foo', + vpc_zone_identifier='vpc_zone_1,vpc_zone_2') + self.service_connection.create_auto_scaling_group(autoscale) + self.assert_request_parameters({ + 'Action': 'CreateAutoScalingGroup', + 'AutoScalingGroupName': 'foo', + 'VPCZoneIdentifier': 'vpc_zone_1,vpc_zone_2', + }, ignore_params_values=['MaxSize', 'MinSize', 'LaunchConfigurationName', 'Version']) + + +class TestAutoScaleGroupHonorCooldown(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def default_body(self): + return b""" + + + 9fb7e2db-6998-11e2-a985-57c82EXAMPLE + + + """ + + def test_honor_cooldown(self): + self.set_http_response(status_code=200) + self.service_connection.set_desired_capacity('foo', 10, True) + self.assert_request_parameters({ + 'Action': 'SetDesiredCapacity', + 'AutoScalingGroupName': 'foo', + 'DesiredCapacity': 10, + 'HonorCooldown': 'true', + }, ignore_params_values=['Version']) + + +class TestScheduledGroup(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestScheduledGroup, self).setUp() + + def default_body(self): + return b""" + + + requestid + + + """ + + def test_scheduled_group_creation(self): + self.set_http_response(status_code=200) + self.service_connection.create_scheduled_group_action('foo', + 'scheduled-foo', + desired_capacity=1, + start_time=datetime(2013, 1, 1, 22, 55, 31), + end_time=datetime(2013, 2, 1, 22, 55, 31), + min_size=1, + max_size=2, + recurrence='0 10 * * *') + self.assert_request_parameters({ + 'Action': 'PutScheduledUpdateGroupAction', + 'AutoScalingGroupName': 'foo', + 'ScheduledActionName': 'scheduled-foo', + 'MaxSize': 2, + 'MinSize': 1, + 'DesiredCapacity': 1, + 'EndTime': '2013-02-01T22:55:31', + 'StartTime': '2013-01-01T22:55:31', + 'Recurrence': '0 10 * * *', + }, ignore_params_values=['Version']) + + +class TestParseAutoScaleGroupResponse(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def default_body(self): + return b""" + + + + + + test_group + EC2 + 2012-09-27T20:19:47.082Z + + test_launchconfig + + + Healthy + us-east-1a + i-z118d054 + test_launchconfig + InService + + + 1 + + us-east-1c + us-east-1a + + + 1 + + 0 + 300 + myarn + + OldestInstance + OldestLaunchConfiguration + + 2 + Something + + + + """ + + def test_get_all_groups_is_parsed_correctly(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_groups(names=['test_group']) + self.assertEqual(len(response), 1, response) + as_group = response[0] + self.assertEqual(as_group.availability_zones, ['us-east-1c', 'us-east-1a']) + self.assertEqual(as_group.default_cooldown, 300) + self.assertEqual(as_group.desired_capacity, 1) + self.assertEqual(as_group.enabled_metrics, []) + self.assertEqual(as_group.health_check_period, 0) + self.assertEqual(as_group.health_check_type, 'EC2') + self.assertEqual(as_group.launch_config_name, 'test_launchconfig') + self.assertEqual(as_group.load_balancers, []) + self.assertEqual(as_group.min_size, 1) + self.assertEqual(as_group.max_size, 2) + self.assertEqual(as_group.name, 'test_group') + self.assertEqual(as_group.suspended_processes, []) + self.assertEqual(as_group.tags, []) + self.assertEqual(as_group.termination_policies, + ['OldestInstance', 'OldestLaunchConfiguration']) + self.assertEqual(as_group.instance_id, 'Something') + + +class TestDescribeTerminationPolicies(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def default_body(self): + return b""" + + + + ClosestToNextInstanceHour + Default + NewestInstance + OldestInstance + OldestLaunchConfiguration + + + + requestid + + + """ + + def test_autoscaling_group_with_termination_policies(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_termination_policies() + self.assertListEqual( + response, + ['ClosestToNextInstanceHour', 'Default', + 'NewestInstance', 'OldestInstance', 'OldestLaunchConfiguration']) + + +class TestLaunchConfigurationDescribe(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def default_body(self): + # This is a dummy response + return b""" + + + + + true + + 2013-01-21T23:04:42.200Z + + my-test-lc + + m1.small + arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc + + ami-514ac838 + + + + true + + false + vpc-12345 + + sg-1234 + + + + + + d05a22f8-b690-11e2-bf8e-2113fEXAMPLE + + + """ + + def test_get_all_launch_configurations(self): + self.set_http_response(status_code=200) + + response = self.service_connection.get_all_launch_configurations() + self.assertTrue(isinstance(response, list)) + self.assertEqual(len(response), 1) + self.assertTrue(isinstance(response[0], LaunchConfiguration)) + + self.assertEqual(response[0].associate_public_ip_address, True) + self.assertEqual(response[0].name, "my-test-lc") + self.assertEqual(response[0].instance_type, "m1.small") + self.assertEqual(response[0].launch_configuration_arn, "arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc") + self.assertEqual(response[0].image_id, "ami-514ac838") + self.assertTrue(isinstance(response[0].instance_monitoring, launchconfig.InstanceMonitoring)) + self.assertEqual(response[0].instance_monitoring.enabled, 'true') + self.assertEqual(response[0].ebs_optimized, False) + self.assertEqual(response[0].block_device_mappings, []) + self.assertEqual(response[0].classic_link_vpc_id, 'vpc-12345') + self.assertEqual(response[0].classic_link_vpc_security_groups, + ['sg-1234']) + + self.assert_request_parameters({ + 'Action': 'DescribeLaunchConfigurations', + }, ignore_params_values=['Version']) + + def test_get_all_configuration_limited(self): + self.set_http_response(status_code=200) + + response = self.service_connection.get_all_launch_configurations(max_records=10, names=["my-test1", "my-test2"]) + self.assert_request_parameters({ + 'Action': 'DescribeLaunchConfigurations', + 'MaxRecords': 10, + 'LaunchConfigurationNames.member.1': 'my-test1', + 'LaunchConfigurationNames.member.2': 'my-test2' + }, ignore_params_values=['Version']) + + +class TestLaunchConfiguration(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def default_body(self): + # This is a dummy response + return b""" + + + """ + + def test_launch_config(self): + # This unit test is based on #753 and #1343 + self.set_http_response(status_code=200) + dev_sdf = EBSBlockDeviceType(snapshot_id='snap-12345') + + bdm = BlockDeviceMapping() + bdm['/dev/sdf'] = dev_sdf + + lc = launchconfig.LaunchConfiguration( + connection=self.service_connection, + name='launch_config', + image_id='123456', + instance_type='m1.large', + user_data='#!/bin/bash', + security_groups=['group1'], + spot_price='price', + block_device_mappings=[bdm], + associate_public_ip_address=True, + volume_type='atype', + delete_on_termination=False, + iops=3000, + classic_link_vpc_id='vpc-1234', + classic_link_vpc_security_groups=['classic_link_group'] + ) + + response = self.service_connection.create_launch_configuration(lc) + + self.assert_request_parameters({ + 'Action': 'CreateLaunchConfiguration', + 'BlockDeviceMappings.member.1.DeviceName': '/dev/sdf', + 'BlockDeviceMappings.member.1.Ebs.DeleteOnTermination': 'false', + 'BlockDeviceMappings.member.1.Ebs.SnapshotId': 'snap-12345', + 'EbsOptimized': 'false', + 'LaunchConfigurationName': 'launch_config', + 'ImageId': '123456', + 'UserData': base64.b64encode(b'#!/bin/bash').decode('utf-8'), + 'InstanceMonitoring.Enabled': 'false', + 'InstanceType': 'm1.large', + 'SecurityGroups.member.1': 'group1', + 'SpotPrice': 'price', + 'AssociatePublicIpAddress': 'true', + 'VolumeType': 'atype', + 'DeleteOnTermination': 'false', + 'Iops': 3000, + 'ClassicLinkVPCId': 'vpc-1234', + 'ClassicLinkVPCSecurityGroups.member.1': 'classic_link_group' + }, ignore_params_values=['Version']) + + +class TestCreateAutoScalePolicy(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestCreateAutoScalePolicy, self).setUp() + + def default_body(self): + return b""" + + + arn:aws:autoscaling:us-east-1:803981987763:scaling\ + Policy:b0dcf5e8 + -02e6-4e31-9719-0675d0dc31ae:autoScalingGroupName/my-test-asg:\ + policyName/my-scal + eout-policy + + + 3cfc6fef-c08b-11e2-a697-2922EXAMPLE + + + """ + + def test_scaling_policy_with_min_adjustment_step(self): + self.set_http_response(status_code=200) + + policy = ScalingPolicy( + name='foo', as_name='bar', + adjustment_type='PercentChangeInCapacity', scaling_adjustment=50, + min_adjustment_step=30) + self.service_connection.create_scaling_policy(policy) + + self.assert_request_parameters({ + 'Action': 'PutScalingPolicy', + 'PolicyName': 'foo', + 'AutoScalingGroupName': 'bar', + 'AdjustmentType': 'PercentChangeInCapacity', + 'ScalingAdjustment': 50, + 'MinAdjustmentStep': 30 + }, ignore_params_values=['Version']) + + def test_scaling_policy_with_wrong_adjustment_type(self): + self.set_http_response(status_code=200) + + policy = ScalingPolicy( + name='foo', as_name='bar', + adjustment_type='ChangeInCapacity', scaling_adjustment=50, + min_adjustment_step=30) + self.service_connection.create_scaling_policy(policy) + + self.assert_request_parameters({ + 'Action': 'PutScalingPolicy', + 'PolicyName': 'foo', + 'AutoScalingGroupName': 'bar', + 'AdjustmentType': 'ChangeInCapacity', + 'ScalingAdjustment': 50 + }, ignore_params_values=['Version']) + + def test_scaling_policy_without_min_adjustment_step(self): + self.set_http_response(status_code=200) + + policy = ScalingPolicy( + name='foo', as_name='bar', + adjustment_type='PercentChangeInCapacity', scaling_adjustment=50) + self.service_connection.create_scaling_policy(policy) + + self.assert_request_parameters({ + 'Action': 'PutScalingPolicy', + 'PolicyName': 'foo', + 'AutoScalingGroupName': 'bar', + 'AdjustmentType': 'PercentChangeInCapacity', + 'ScalingAdjustment': 50 + }, ignore_params_values=['Version']) + + +class TestPutNotificationConfiguration(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestPutNotificationConfiguration, self).setUp() + + def default_body(self): + return b""" + + + requestid + + + """ + + def test_autoscaling_group_put_notification_configuration(self): + self.set_http_response(status_code=200) + autoscale = AutoScalingGroup( + name='ana', launch_config='lauch_config', + min_size=1, max_size=2, + termination_policies=['OldestInstance', 'OldestLaunchConfiguration']) + self.service_connection.put_notification_configuration(autoscale, 'arn:aws:sns:us-east-1:19890506:AutoScaling-Up', ['autoscaling:EC2_INSTANCE_LAUNCH']) + self.assert_request_parameters({ + 'Action': 'PutNotificationConfiguration', + 'AutoScalingGroupName': 'ana', + 'NotificationTypes.member.1': 'autoscaling:EC2_INSTANCE_LAUNCH', + 'TopicARN': 'arn:aws:sns:us-east-1:19890506:AutoScaling-Up', + }, ignore_params_values=['Version']) + + +class TestDeleteNotificationConfiguration(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestDeleteNotificationConfiguration, self).setUp() + + def default_body(self): + return b""" + + + requestid + + + """ + + def test_autoscaling_group_put_notification_configuration(self): + self.set_http_response(status_code=200) + autoscale = AutoScalingGroup( + name='ana', launch_config='lauch_config', + min_size=1, max_size=2, + termination_policies=['OldestInstance', 'OldestLaunchConfiguration']) + self.service_connection.delete_notification_configuration(autoscale, 'arn:aws:sns:us-east-1:19890506:AutoScaling-Up') + self.assert_request_parameters({ + 'Action': 'DeleteNotificationConfiguration', + 'AutoScalingGroupName': 'ana', + 'TopicARN': 'arn:aws:sns:us-east-1:19890506:AutoScaling-Up', + }, ignore_params_values=['Version']) + + +class TestAutoScalingTag(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def default_body(self): + return b""" + + + requestId + + + """ + + def test_create_or_update_tags(self): + self.set_http_response(status_code=200) + + tags = [ + Tag( + connection=self.service_connection, + key='alpha', + value='tango', + resource_id='sg-00000000', + resource_type='auto-scaling-group', + propagate_at_launch=True + ), + Tag( + connection=self.service_connection, + key='bravo', + value='sierra', + resource_id='sg-00000000', + resource_type='auto-scaling-group', + propagate_at_launch=False + )] + + response = self.service_connection.create_or_update_tags(tags) + + self.assert_request_parameters({ + 'Action': 'CreateOrUpdateTags', + 'Tags.member.1.ResourceType': 'auto-scaling-group', + 'Tags.member.1.ResourceId': 'sg-00000000', + 'Tags.member.1.Key': 'alpha', + 'Tags.member.1.Value': 'tango', + 'Tags.member.1.PropagateAtLaunch': 'true', + 'Tags.member.2.ResourceType': 'auto-scaling-group', + 'Tags.member.2.ResourceId': 'sg-00000000', + 'Tags.member.2.Key': 'bravo', + 'Tags.member.2.Value': 'sierra', + 'Tags.member.2.PropagateAtLaunch': 'false' + }, ignore_params_values=['Version']) + + def test_endElement(self): + for i in [ + ('Key', 'mykey', 'key'), + ('Value', 'myvalue', 'value'), + ('ResourceType', 'auto-scaling-group', 'resource_type'), + ('ResourceId', 'sg-01234567', 'resource_id'), + ('PropagateAtLaunch', 'true', 'propagate_at_launch')]: + self.check_tag_attributes_set(i[0], i[1], i[2]) + + def check_tag_attributes_set(self, name, value, attr): + tag = Tag() + tag.endElement(name, value, None) + if value == 'true': + self.assertEqual(getattr(tag, attr), True) + else: + self.assertEqual(getattr(tag, attr), value) + + +class TestAttachInstances(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestAttachInstances, self).setUp() + + def default_body(self): + return b""" + + + requestid + + + """ + + def test_attach_instances(self): + self.set_http_response(status_code=200) + self.service_connection.attach_instances( + 'autoscale', + ['inst2', 'inst1', 'inst4'] + ) + self.assert_request_parameters({ + 'Action': 'AttachInstances', + 'AutoScalingGroupName': 'autoscale', + 'InstanceIds.member.1': 'inst2', + 'InstanceIds.member.2': 'inst1', + 'InstanceIds.member.3': 'inst4', + }, ignore_params_values=['Version']) + + +class TestDetachInstances(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestDetachInstances, self).setUp() + + def default_body(self): + return b""" + + + requestid + + + """ + + def test_detach_instances(self): + self.set_http_response(status_code=200) + self.service_connection.detach_instances( + 'autoscale', + ['inst2', 'inst1', 'inst4'] + ) + self.assert_request_parameters({ + 'Action': 'DetachInstances', + 'AutoScalingGroupName': 'autoscale', + 'InstanceIds.member.1': 'inst2', + 'InstanceIds.member.2': 'inst1', + 'InstanceIds.member.3': 'inst4', + 'ShouldDecrementDesiredCapacity': 'true', + }, ignore_params_values=['Version']) + + def test_detach_instances_with_decrement_desired_capacity(self): + self.set_http_response(status_code=200) + self.service_connection.detach_instances( + 'autoscale', + ['inst2', 'inst1', 'inst4'], + True + ) + self.assert_request_parameters({ + 'Action': 'DetachInstances', + 'AutoScalingGroupName': 'autoscale', + 'InstanceIds.member.1': 'inst2', + 'InstanceIds.member.2': 'inst1', + 'InstanceIds.member.3': 'inst4', + 'ShouldDecrementDesiredCapacity': 'true', + }, ignore_params_values=['Version']) + + def test_detach_instances_without_decrement_desired_capacity(self): + self.set_http_response(status_code=200) + self.service_connection.detach_instances( + 'autoscale', + ['inst2', 'inst1', 'inst4'], + False + ) + self.assert_request_parameters({ + 'Action': 'DetachInstances', + 'AutoScalingGroupName': 'autoscale', + 'InstanceIds.member.1': 'inst2', + 'InstanceIds.member.2': 'inst1', + 'InstanceIds.member.3': 'inst4', + 'ShouldDecrementDesiredCapacity': 'false', + }, ignore_params_values=['Version']) + + +class TestGetAccountLimits(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestGetAccountLimits, self).setUp() + + def default_body(self): + return b""" + + 6 + 3 + + requestid + + + """ + + def test_autoscaling_group_put_notification_configuration(self): + self.set_http_response(status_code=200) + limits = self.service_connection.get_account_limits() + self.assert_request_parameters({ + 'Action': 'DescribeAccountLimits', + }, ignore_params_values=['Version']) + self.assertEqual(limits.max_autoscaling_groups, 6) + self.assertEqual(limits.max_launch_configurations, 3) + + +class TestGetAdjustmentTypes(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def setUp(self): + super(TestGetAdjustmentTypes, self).setUp() + + def default_body(self): + return b""" + + + + + ChangeInCapacity + + + ExactCapacity + + + PercentChangeInCapacity + + + + + requestId + + + """ + + def test_autoscaling_adjustment_types(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_adjustment_types() + self.assert_request_parameters({ + 'Action': 'DescribeAdjustmentTypes' + }, ignore_params_values=['Version']) + + self.assertTrue(isinstance(response, list)) + self.assertEqual(response[0].adjustment_type, "ChangeInCapacity") + self.assertEqual(response[1].adjustment_type, "ExactCapacity") + self.assertEqual(response[2].adjustment_type, "PercentChangeInCapacity") + + +class TestLaunchConfigurationDescribeWithBlockDeviceTypes(AWSMockServiceTestCase): + connection_class = AutoScaleConnection + + def default_body(self): + # This is a dummy response + return b""" + + + + + true + + 2013-01-21T23:04:42.200Z + + my-test-lc + + m1.small + arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc + + + /dev/xvdp + + snap-1234abcd + 1000 + true + io1 + 100 + + + + ephemeral1 + /dev/xvdc + + + ephemeral0 + /dev/xvdb + + + /dev/xvdh + + 2000 + false + io1 + 200 + + + + ami-514ac838 + + + + true + + false + + + + + d05a22f8-b690-11e2-bf8e-2113fEXAMPLE + + + """ + + def test_get_all_launch_configurations_with_block_device_types(self): + self.set_http_response(status_code=200) + self.service_connection.use_block_device_types = True + + response = self.service_connection.get_all_launch_configurations() + self.assertTrue(isinstance(response, list)) + self.assertEqual(len(response), 1) + self.assertTrue(isinstance(response[0], LaunchConfiguration)) + + self.assertEqual(response[0].associate_public_ip_address, True) + self.assertEqual(response[0].name, "my-test-lc") + self.assertEqual(response[0].instance_type, "m1.small") + self.assertEqual(response[0].launch_configuration_arn, "arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc") + self.assertEqual(response[0].image_id, "ami-514ac838") + self.assertTrue(isinstance(response[0].instance_monitoring, launchconfig.InstanceMonitoring)) + self.assertEqual(response[0].instance_monitoring.enabled, 'true') + self.assertEqual(response[0].ebs_optimized, False) + + self.assertEqual(response[0].block_device_mappings['/dev/xvdb'].ephemeral_name, 'ephemeral0') + + self.assertEqual(response[0].block_device_mappings['/dev/xvdc'].ephemeral_name, 'ephemeral1') + + self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].snapshot_id, 'snap-1234abcd') + self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].delete_on_termination, True) + self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].iops, 1000) + self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].size, 100) + self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].volume_type, 'io1') + + self.assertEqual(response[0].block_device_mappings['/dev/xvdh'].delete_on_termination, False) + self.assertEqual(response[0].block_device_mappings['/dev/xvdh'].iops, 2000) + self.assertEqual(response[0].block_device_mappings['/dev/xvdh'].size, 200) + self.assertEqual(response[0].block_device_mappings['/dev/xvdh'].volume_type, 'io1') + + self.assert_request_parameters({ + 'Action': 'DescribeLaunchConfigurations', + }, ignore_params_values=['Version']) + + def test_get_all_configuration_limited(self): + self.set_http_response(status_code=200) + + response = self.service_connection.get_all_launch_configurations(max_records=10, names=["my-test1", "my-test2"]) + self.assert_request_parameters({ + 'Action': 'DescribeLaunchConfigurations', + 'MaxRecords': 10, + 'LaunchConfigurationNames.member.1': 'my-test1', + 'LaunchConfigurationNames.member.2': 'my-test2' + }, ignore_params_values=['Version']) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/cloudwatch/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/cloudwatch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/cloudwatch/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/cloudwatch/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..d92669d46bb7c392f4e44cb2c855f3df352d043d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/cloudwatch/test_connection.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import datetime + +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.ec2.cloudwatch import CloudWatchConnection + + +class TestCloudWatchConnection(AWSMockServiceTestCase): + + connection_class = CloudWatchConnection + + def test_build_put_params_multiple_everything(self): + # This dictionary gets modified by the method call. + # Check to make sure all updates happen appropriately. + params = {} + # Again, these are rubbish parameters. Pay them no mind, we care more + # about the functionality of the method + name = ['whatever', 'goeshere'] + value = None + timestamp = [ + datetime.datetime(2013, 5, 13, 9, 2, 35), + datetime.datetime(2013, 5, 12, 9, 2, 35), + ] + unit = ['lbs', 'ft'] + dimensions = None + statistics = [ + { + 'maximum': 5, + 'minimum': 1, + 'samplecount': 3, + 'sum': 7, + }, + { + 'maximum': 6, + 'minimum': 2, + 'samplecount': 4, + 'sum': 5, + }, + ] + + # The important part is that this shouldn't generate a warning (due + # to overwriting a variable) & should have the correct number of + # Metrics (2). + self.service_connection.build_put_params( + params, + name=name, + value=value, + timestamp=timestamp, + unit=unit, + dimensions=dimensions, + statistics=statistics + ) + + self.assertEqual(params, { + 'MetricData.member.1.MetricName': 'whatever', + 'MetricData.member.1.StatisticValues.Maximum': 5, + 'MetricData.member.1.StatisticValues.Minimum': 1, + 'MetricData.member.1.StatisticValues.SampleCount': 3, + 'MetricData.member.1.StatisticValues.Sum': 7, + 'MetricData.member.1.Timestamp': '2013-05-13T09:02:35', + 'MetricData.member.1.Unit': 'lbs', + 'MetricData.member.2.MetricName': 'goeshere', + 'MetricData.member.2.StatisticValues.Maximum': 6, + 'MetricData.member.2.StatisticValues.Minimum': 2, + 'MetricData.member.2.StatisticValues.SampleCount': 4, + 'MetricData.member.2.StatisticValues.Sum': 5, + 'MetricData.member.2.Timestamp': '2013-05-12T09:02:35', + # If needed, comment this next line to cause a test failure & see + # the logging warning. + 'MetricData.member.2.Unit': 'ft', + }) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/test_attribute.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/test_attribute.py new file mode 100644 index 0000000000000000000000000000000000000000..40edecba8279ecdd9e1c7f832a9fdf6fa90cedc6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/test_attribute.py @@ -0,0 +1,205 @@ +from tests.unit import unittest +from tests.compat import mock + +from boto.ec2.elb import ELBConnection +from boto.ec2.elb import LoadBalancer +from boto.ec2.elb.attributes import LbAttributes + +ATTRIBUTE_GET_TRUE_CZL_RESPONSE = b""" + + + + + true + + + + + 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + + +""" + +ATTRIBUTE_GET_FALSE_CZL_RESPONSE = b""" + + + + + false + + + + + 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + + +""" + +ATTRIBUTE_GET_CS_RESPONSE = b""" + + + + + 30 + + + + + 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + + +""" + +ATTRIBUTE_SET_RESPONSE = b""" + + + + 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + + +""" + +# make_request arguments for setting attributes. +# Format: (API_COMMAND, API_PARAMS, API_PATH, API_METHOD) +ATTRIBUTE_SET_CZL_TRUE_REQUEST = ( + 'ModifyLoadBalancerAttributes', + {'LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled': 'true', + 'LoadBalancerName': 'test_elb'}, mock.ANY, mock.ANY) +ATTRIBUTE_SET_CZL_FALSE_REQUEST = ( + 'ModifyLoadBalancerAttributes', + {'LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled': 'false', + 'LoadBalancerName': 'test_elb'}, mock.ANY, mock.ANY) + +# Tests to be run on an LbAttributes +# Format: +# (EC2_RESPONSE_STRING, list( (string_of_attribute_to_test, value) ) ) +ATTRIBUTE_TESTS = [ + (ATTRIBUTE_GET_TRUE_CZL_RESPONSE, + [('cross_zone_load_balancing.enabled', True)]), + (ATTRIBUTE_GET_FALSE_CZL_RESPONSE, + [('cross_zone_load_balancing.enabled', False)]), + (ATTRIBUTE_GET_CS_RESPONSE, + [('connecting_settings.idle_timeout', 30)]), +] + + +class TestLbAttributes(unittest.TestCase): + """Tests LB Attributes.""" + def _setup_mock(self): + """Sets up a mock elb request. + Returns: response, elb connection and LoadBalancer + """ + mock_response = mock.Mock() + mock_response.status = 200 + elb = ELBConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key') + elb.make_request = mock.Mock(return_value=mock_response) + return mock_response, elb, LoadBalancer(elb, 'test_elb') + + def _verify_attributes(self, attributes, attr_tests): + """Verifies an LbAttributes object.""" + for attr, result in attr_tests: + attr_result = attributes + for sub_attr in attr.split('.'): + attr_result = getattr(attr_result, sub_attr, None) + self.assertEqual(attr_result, result) + + def test_get_all_lb_attributes(self): + """Tests getting the LbAttributes from the elb.connection.""" + mock_response, elb, _ = self._setup_mock() + + for response, attr_tests in ATTRIBUTE_TESTS: + mock_response.read.return_value = response + attributes = elb.get_all_lb_attributes('test_elb') + self.assertTrue(isinstance(attributes, LbAttributes)) + self._verify_attributes(attributes, attr_tests) + + def test_get_lb_attribute(self): + """Tests getting a single attribute from elb.connection.""" + mock_response, elb, _ = self._setup_mock() + + tests = [ + ('crossZoneLoadBalancing', True, ATTRIBUTE_GET_TRUE_CZL_RESPONSE), + ('crossZoneLoadBalancing', False, ATTRIBUTE_GET_FALSE_CZL_RESPONSE), + ] + + for attr, value, response in tests: + mock_response.read.return_value = response + status = elb.get_lb_attribute('test_elb', attr) + self.assertEqual(status, value) + + def test_modify_lb_attribute(self): + """Tests setting the attributes from elb.connection.""" + mock_response, elb, _ = self._setup_mock() + + tests = [ + ('crossZoneLoadBalancing', True, ATTRIBUTE_SET_CZL_TRUE_REQUEST), + ('crossZoneLoadBalancing', False, ATTRIBUTE_SET_CZL_FALSE_REQUEST), + ] + + for attr, value, args in tests: + mock_response.read.return_value = ATTRIBUTE_SET_RESPONSE + result = elb.modify_lb_attribute('test_elb', attr, value) + self.assertTrue(result) + elb.make_request.assert_called_with(*args) + + def test_lb_get_attributes(self): + """Tests the LbAttributes from the ELB object.""" + mock_response, _, lb = self._setup_mock() + + for response, attr_tests in ATTRIBUTE_TESTS: + mock_response.read.return_value = response + attributes = lb.get_attributes(force=True) + self.assertTrue(isinstance(attributes, LbAttributes)) + self._verify_attributes(attributes, attr_tests) + + def test_lb_is_cross_zone_load_balancing(self): + """Tests checking is_cross_zone_load_balancing.""" + mock_response, _, lb = self._setup_mock() + + tests = [ + # Format: (method, args, result, response) + # Gets a true result. + (lb.is_cross_zone_load_balancing, [], True, + ATTRIBUTE_GET_TRUE_CZL_RESPONSE), + # Returns the previous calls cached value. + (lb.is_cross_zone_load_balancing, [], True, + ATTRIBUTE_GET_FALSE_CZL_RESPONSE), + # Gets a false result. + (lb.is_cross_zone_load_balancing, [True], False, + ATTRIBUTE_GET_FALSE_CZL_RESPONSE), + ] + + for method, args, result, response in tests: + mock_response.read.return_value = response + self.assertEqual(method(*args), result) + + def test_lb_enable_cross_zone_load_balancing(self): + """Tests enabling cross zone balancing from LoadBalancer.""" + mock_response, elb, lb = self._setup_mock() + + mock_response.read.return_value = ATTRIBUTE_SET_RESPONSE + self.assertTrue(lb.enable_cross_zone_load_balancing()) + elb.make_request.assert_called_with(*ATTRIBUTE_SET_CZL_TRUE_REQUEST) + + def test_lb_disable_cross_zone_load_balancing(self): + """Tests disabling cross zone balancing from LoadBalancer.""" + mock_response, elb, lb = self._setup_mock() + + mock_response.read.return_value = ATTRIBUTE_SET_RESPONSE + self.assertTrue(lb.disable_cross_zone_load_balancing()) + elb.make_request.assert_called_with(*ATTRIBUTE_SET_CZL_FALSE_REQUEST) + + def test_lb_get_connection_settings(self): + """Tests checking connectionSettings attribute""" + mock_response, elb, _ = self._setup_mock() + + attrs = [('idle_timeout', 30), ] + mock_response.read.return_value = ATTRIBUTE_GET_CS_RESPONSE + attributes = elb.get_all_lb_attributes('test_elb') + self.assertTrue(isinstance(attributes, LbAttributes)) + for attr, value in attrs: + self.assertEqual(getattr(attributes.connecting_settings, attr), value) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/test_listener.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/test_listener.py new file mode 100644 index 0000000000000000000000000000000000000000..a9d29c5b95a82dccb51252f7f4d0fd1d40993b0b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/test_listener.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python +import xml.sax +from tests.unit import unittest + +import boto.resultset +from boto.ec2.elb.loadbalancer import LoadBalancer +from boto.ec2.elb.listener import Listener + + +LISTENERS_RESPONSE = b""" + + + + + + 2013-07-09T19:18:00.520Z + elb-boto-unit-test + + 30 + TCP:8000 + 10 + 5 + 2 + + + + + + HTTP + 80 + HTTP + 8000 + + + + + + HTTP + 8080 + HTTP + 80 + + + + + + TCP + 2525 + TCP + 25 + + + + + + + + + + + us-east-1a + + elb-boto-unit-test-408121642.us-east-1.elb.amazonaws.com + Z3DZXE0Q79N41H + internet-facing + + amazon-elb + amazon-elb-sg + + elb-boto-unit-test-408121642.us-east-1.elb.amazonaws.com + + + + + + + 5763d932-e8cc-11e2-a940-11136cceffb8 + + +""" + + +class TestListenerResponseParsing(unittest.TestCase): + def test_parse_complex(self): + rs = boto.resultset.ResultSet([ + ('member', LoadBalancer) + ]) + h = boto.handler.XmlHandler(rs, None) + xml.sax.parseString(LISTENERS_RESPONSE, h) + listeners = rs[0].listeners + self.assertEqual( + sorted([l.get_complex_tuple() for l in listeners]), + [ + (80, 8000, 'HTTP', 'HTTP'), + (2525, 25, 'TCP', 'TCP'), + (8080, 80, 'HTTP', 'HTTP'), + ] + ) + +class TestListenerGetItem(unittest.TestCase): + def test_getitem_for_http_listener(self): + listener = Listener(load_balancer_port=80, + instance_port=80, + protocol='HTTP', + instance_protocol='HTTP') + self.assertEqual(listener[0], 80) + self.assertEqual(listener[1], 80) + self.assertEqual(listener[2], 'HTTP') + self.assertEqual(listener[3], 'HTTP') + + def test_getitem_for_https_listener(self): + listener = Listener(load_balancer_port=443, + instance_port=80, + protocol='HTTPS', + instance_protocol='HTTP', + ssl_certificate_id='look_at_me_im_an_arn') + self.assertEqual(listener[0], 443) + self.assertEqual(listener[1], 80) + self.assertEqual(listener[2], 'HTTPS') + self.assertEqual(listener[3], 'HTTP') + self.assertEqual(listener[4], 'look_at_me_im_an_arn') + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/test_loadbalancer.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/test_loadbalancer.py new file mode 100644 index 0000000000000000000000000000000000000000..a042df4863d8bf003912edd74c410ce464aaafe3 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/elb/test_loadbalancer.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python + +from tests.unit import unittest +from tests.compat import mock + +from boto.ec2.elb import ELBConnection +from boto.ec2.elb import LoadBalancer + +DISABLE_RESPONSE = b""" + + 3be1508e-c444-4fef-89cc-0b1223c4f02fEXAMPLE + + sample-zone + + +""" + + +class TestInstanceStatusResponseParsing(unittest.TestCase): + def test_next_token(self): + elb = ELBConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key') + mock_response = mock.Mock() + mock_response.read.return_value = DISABLE_RESPONSE + mock_response.status = 200 + elb.make_request = mock.Mock(return_value=mock_response) + disabled = elb.disable_availability_zones('mine', ['sample-zone']) + self.assertEqual(disabled, ['sample-zone']) + + +DESCRIBE_RESPONSE = b""" + + + + + + 2013-07-09T19:18:00.520Z + elb-boto-unit-test + + + + + + + + + + + + AWSConsole-SSLNegotiationPolicy-my-test-loadbalancer + EnableProxyProtocol + + + + + us-east-1a + + elb-boto-unit-test-408121642.us-east-1.elb.amazonaws.com + Z3DZXE0Q79N41H + internet-facing + + amazon-elb + amazon-elb-sg + + elb-boto-unit-test-408121642.us-east-1.elb.amazonaws.com + + + + EnableProxyProtocol + + 80 + + + + + + 1234 + + + 5763d932-e8cc-11e2-a940-11136cceffb8 + + +""" + + +class TestDescribeLoadBalancers(unittest.TestCase): + def test_other_policy(self): + elb = ELBConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key') + mock_response = mock.Mock() + mock_response.read.return_value = DESCRIBE_RESPONSE + mock_response.status = 200 + elb.make_request = mock.Mock(return_value=mock_response) + load_balancers = elb.get_all_load_balancers() + self.assertEqual(len(load_balancers), 1) + + lb = load_balancers[0] + self.assertEqual(len(lb.policies.other_policies), 2) + self.assertEqual(lb.policies.other_policies[0].policy_name, + 'AWSConsole-SSLNegotiationPolicy-my-test-loadbalancer') + self.assertEqual(lb.policies.other_policies[1].policy_name, + 'EnableProxyProtocol') + + self.assertEqual(len(lb.backends), 1) + self.assertEqual(len(lb.backends[0].policies), 1) + self.assertEqual(lb.backends[0].policies[0].policy_name, + 'EnableProxyProtocol') + self.assertEqual(lb.backends[0].instance_port, 80) + + def test_request_with_marker(self): + elb = ELBConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key') + mock_response = mock.Mock() + mock_response.read.return_value = DESCRIBE_RESPONSE + mock_response.status = 200 + elb.make_request = mock.Mock(return_value=mock_response) + load_balancers1 = elb.get_all_load_balancers() + self.assertEqual('1234', load_balancers1.marker) + load_balancers2 = elb.get_all_load_balancers(marker=load_balancers1.marker) + self.assertEqual(len(load_balancers2), 1) + + +DETACH_RESPONSE = r""" + + 3be1508e-c444-4fef-89cc-0b1223c4f02fEXAMPLE + +""" + + +class TestDetachSubnets(unittest.TestCase): + def test_detach_subnets(self): + elb = ELBConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key') + lb = LoadBalancer(elb, "mylb") + + mock_response = mock.Mock() + mock_response.read.return_value = DETACH_RESPONSE + mock_response.status = 200 + elb.make_request = mock.Mock(return_value=mock_response) + lb.detach_subnets("s-xxx") + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_address.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_address.py new file mode 100644 index 0000000000000000000000000000000000000000..519918e752dd65b88356a29d53d270249613501d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_address.py @@ -0,0 +1,145 @@ +from tests.compat import mock, unittest + +from boto.ec2.address import Address + + +class AddressTest(unittest.TestCase): + def setUp(self): + self.address = Address() + self.address.connection = mock.Mock() + self.address.public_ip = "192.168.1.1" + + def check_that_attribute_has_been_set(self, name, value, attribute): + self.address.endElement(name, value, None) + self.assertEqual(getattr(self.address, attribute), value) + + def test_endElement_sets_correct_attributes_with_values(self): + for arguments in [("publicIp", "192.168.1.1", "public_ip"), + ("instanceId", 1, "instance_id"), + ("domain", "some domain", "domain"), + ("allocationId", 1, "allocation_id"), + ("associationId", 1, "association_id"), + ("somethingRandom", "somethingRandom", "somethingRandom")]: + self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2]) + + def test_release_calls_connection_release_address_with_correct_args(self): + self.address.release() + self.address.connection.release_address.assert_called_with( + public_ip="192.168.1.1", + dry_run=False + ) + + def test_associate_calls_connection_associate_address_with_correct_args(self): + self.address.associate(1) + self.address.connection.associate_address.assert_called_with( + instance_id=1, + public_ip="192.168.1.1", + allow_reassociation=False, + network_interface_id=None, + private_ip_address=None, + dry_run=False + ) + + def test_disassociate_calls_connection_disassociate_address_with_correct_args(self): + self.address.disassociate() + self.address.connection.disassociate_address.assert_called_with( + public_ip="192.168.1.1", + dry_run=False + ) + + +class AddressWithAllocationTest(unittest.TestCase): + def setUp(self): + self.address = Address() + self.address.connection = mock.Mock() + self.address.public_ip = "192.168.1.1" + self.address.allocation_id = "aid1" + + def check_that_attribute_has_been_set(self, name, value, attribute): + self.address.endElement(name, value, None) + self.assertEqual(getattr(self.address, attribute), value) + + def test_endElement_sets_correct_attributes_with_values(self): + for arguments in [("publicIp", "192.168.1.1", "public_ip"), + ("instanceId", 1, "instance_id"), + ("domain", "some domain", "domain"), + ("allocationId", 1, "allocation_id"), + ("associationId", 1, "association_id"), + ("somethingRandom", "somethingRandom", "somethingRandom")]: + self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2]) + + def test_release_calls_connection_release_address_with_correct_args(self): + self.address.release() + self.address.connection.release_address.assert_called_with( + allocation_id="aid1", + dry_run=False + ) + + def test_associate_calls_connection_associate_address_with_correct_args(self): + self.address.associate(1) + self.address.connection.associate_address.assert_called_with( + instance_id=1, + public_ip="192.168.1.1", + allocation_id="aid1", + network_interface_id=None, + private_ip_address=None, + allow_reassociation=False, + dry_run=False + ) + + def test_disassociate_calls_connection_disassociate_address_with_correct_args(self): + self.address.disassociate() + self.address.connection.disassociate_address.assert_called_with( + public_ip="192.168.1.1", + dry_run=False + ) + +class AddressWithNetworkInterfaceTest(unittest.TestCase): + def setUp(self): + self.address = Address() + self.address.connection = mock.Mock() + self.address.public_ip = "192.168.1.1" + self.address.allocation_id = "aid1" + + def check_that_attribute_has_been_set(self, name, value, attribute): + self.address.endElement(name, value, None) + self.assertEqual(getattr(self.address, attribute), value) + + def test_endElement_sets_correct_attributes_with_values(self): + for arguments in [("publicIp", "192.168.1.1", "public_ip"), + ("instanceId", 1, "instance_id"), + ("domain", "some domain", "domain"), + ("allocationId", 1, "allocation_id"), + ("associationId", 1, "association_id"), + ("somethingRandom", "somethingRandom", "somethingRandom")]: + self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2]) + + + def test_release_calls_connection_release_address_with_correct_args(self): + self.address.release() + self.address.connection.release_address.assert_called_with( + allocation_id="aid1", + dry_run=False + ) + + def test_associate_calls_connection_associate_address_with_correct_args(self): + self.address.associate(network_interface_id=1) + self.address.connection.associate_address.assert_called_with( + instance_id=None, + public_ip="192.168.1.1", + network_interface_id=1, + private_ip_address=None, + allocation_id="aid1", + allow_reassociation=False, + dry_run=False + ) + + def test_disassociate_calls_connection_disassociate_address_with_correct_args(self): + self.address.disassociate() + self.address.connection.disassociate_address.assert_called_with( + public_ip="192.168.1.1", + dry_run=False + ) + +if __name__ == "__main__": + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_blockdevicemapping.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_blockdevicemapping.py new file mode 100644 index 0000000000000000000000000000000000000000..83cdf184800cbc311bce3e7a06d33fb87fecb5a4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_blockdevicemapping.py @@ -0,0 +1,146 @@ +from tests.compat import unittest + +from boto.ec2.connection import EC2Connection +from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping + +from tests.compat import OrderedDict +from tests.unit import AWSMockServiceTestCase + + +class BlockDeviceTypeTests(unittest.TestCase): + def setUp(self): + self.block_device_type = BlockDeviceType() + + def check_that_attribute_has_been_set(self, name, value, attribute): + self.block_device_type.endElement(name, value, None) + self.assertEqual(getattr(self.block_device_type, attribute), value) + + def test_endElement_sets_correct_attributes_with_values(self): + for arguments in [("volumeId", 1, "volume_id"), + ("virtualName", "some name", "ephemeral_name"), + ("snapshotId", 1, "snapshot_id"), + ("volumeSize", 1, "size"), + ("status", "some status", "status"), + ("attachTime", 1, "attach_time"), + ("somethingRandom", "somethingRandom", "somethingRandom")]: + self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2]) + + def test_endElement_with_name_NoDevice_value_true(self): + self.block_device_type.endElement("NoDevice", 'true', None) + self.assertEqual(self.block_device_type.no_device, True) + + def test_endElement_with_name_NoDevice_value_other(self): + self.block_device_type.endElement("NoDevice", 'something else', None) + self.assertEqual(self.block_device_type.no_device, False) + + def test_endElement_with_name_deleteOnTermination_value_true(self): + self.block_device_type.endElement("deleteOnTermination", "true", None) + self.assertEqual(self.block_device_type.delete_on_termination, True) + + def test_endElement_with_name_deleteOnTermination_value_other(self): + self.block_device_type.endElement("deleteOnTermination", 'something else', None) + self.assertEqual(self.block_device_type.delete_on_termination, False) + + def test_endElement_with_name_encrypted_value_true(self): + self.block_device_type.endElement("Encrypted", "true", None) + self.assertEqual(self.block_device_type.encrypted, True) + + def test_endElement_with_name_Encrypted_value_other(self): + self.block_device_type.endElement("Encrypted", 'something else', None) + self.assertEqual(self.block_device_type.encrypted, False) + + +class BlockDeviceMappingTests(unittest.TestCase): + def setUp(self): + self.block_device_mapping = BlockDeviceMapping() + + def block_device_type_eq(self, b1, b2): + if isinstance(b1, BlockDeviceType) and isinstance(b2, BlockDeviceType): + return all([b1.connection == b2.connection, + b1.ephemeral_name == b2.ephemeral_name, + b1.no_device == b2.no_device, + b1.volume_id == b2.volume_id, + b1.snapshot_id == b2.snapshot_id, + b1.status == b2.status, + b1.attach_time == b2.attach_time, + b1.delete_on_termination == b2.delete_on_termination, + b1.size == b2.size, + b1.encrypted == b2.encrypted]) + + def test_startElement_with_name_ebs_sets_and_returns_current_value(self): + retval = self.block_device_mapping.startElement("ebs", None, None) + assert self.block_device_type_eq(retval, BlockDeviceType(self.block_device_mapping)) + + def test_startElement_with_name_virtualName_sets_and_returns_current_value(self): + retval = self.block_device_mapping.startElement("virtualName", None, None) + assert self.block_device_type_eq(retval, BlockDeviceType(self.block_device_mapping)) + + def test_endElement_with_name_device_sets_current_name_dev_null(self): + self.block_device_mapping.endElement("device", "/dev/null", None) + self.assertEqual(self.block_device_mapping.current_name, "/dev/null") + + def test_endElement_with_name_device_sets_current_name(self): + self.block_device_mapping.endElement("deviceName", "some device name", None) + self.assertEqual(self.block_device_mapping.current_name, "some device name") + + def test_endElement_with_name_item_sets_current_name_key_to_current_value(self): + self.block_device_mapping.current_name = "some name" + self.block_device_mapping.current_value = "some value" + self.block_device_mapping.endElement("item", "some item", None) + self.assertEqual(self.block_device_mapping["some name"], "some value") + + +class TestLaunchConfiguration(AWSMockServiceTestCase): + connection_class = EC2Connection + + def default_body(self): + # This is a dummy response + return b""" + + + """ + + def test_run_instances_block_device_mapping(self): + # Same as the test in ``unit/ec2/autoscale/test_group.py:TestLaunchConfiguration``, + # but with modified request parameters (due to a mismatch between EC2 & + # Autoscaling). + self.set_http_response(status_code=200) + dev_sdf = BlockDeviceType(snapshot_id='snap-12345') + dev_sdg = BlockDeviceType(snapshot_id='snap-12346', delete_on_termination=True, encrypted=True) + + class OrderedBlockDeviceMapping(OrderedDict, BlockDeviceMapping): + pass + + bdm = OrderedBlockDeviceMapping() + bdm.update(OrderedDict((('/dev/sdf', dev_sdf), ('/dev/sdg', dev_sdg)))) + + response = self.service_connection.run_instances( + image_id='123456', + instance_type='m1.large', + security_groups=['group1', 'group2'], + block_device_map=bdm + ) + + self.assert_request_parameters({ + 'Action': 'RunInstances', + 'BlockDeviceMapping.1.DeviceName': '/dev/sdf', + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'false', + 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345', + 'BlockDeviceMapping.2.DeviceName': '/dev/sdg', + 'BlockDeviceMapping.2.Ebs.DeleteOnTermination': 'true', + 'BlockDeviceMapping.2.Ebs.SnapshotId': 'snap-12346', + 'BlockDeviceMapping.2.Ebs.Encrypted': 'true', + 'ImageId': '123456', + 'InstanceType': 'm1.large', + 'MaxCount': 1, + 'MinCount': 1, + 'SecurityGroup.1': 'group1', + 'SecurityGroup.2': 'group2', + }, ignore_params_values=[ + 'Version', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', + 'Timestamp' + ]) + + +if __name__ == "__main__": + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_connection.py new file mode 100755 index 0000000000000000000000000000000000000000..b51e0e36e39cc2ea16febc85e1b9a19a378b93cc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_connection.py @@ -0,0 +1,1703 @@ +#!/usr/bin/env python +import httplib + +from datetime import datetime, timedelta +from mock import MagicMock, Mock +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +import boto.ec2 + +from boto.regioninfo import RegionInfo +from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping +from boto.ec2.connection import EC2Connection +from boto.ec2.snapshot import Snapshot +from boto.ec2.reservedinstance import ReservedInstancesConfiguration + + +class TestEC2ConnectionBase(AWSMockServiceTestCase): + connection_class = EC2Connection + + def setUp(self): + super(TestEC2ConnectionBase, self).setUp() + self.ec2 = self.service_connection + + +class TestReservedInstanceOfferings(TestEC2ConnectionBase): + + def default_body(self): + return b""" + + d3253568-edcf-4897-9a3d-fb28e0b3fa38 + + + 2964d1bf71d8 + c1.medium + us-east-1c + 94608000 + 775.0 + 0.0 + product description + default + USD + Heavy Utilization + + + Hourly + 0.095 + + + false + + + 0.045 + 1 + + + + + 2dce26e46889 + c1.medium + us-east-1c + 94608000 + 775.0 + 0.0 + Linux/UNIX + default + USD + Heavy Utilization + + + Hourly + 0.035 + + + false + + + + next_token + + """ + + def test_get_reserved_instance_offerings(self): + self.set_http_response(status_code=200) + response = self.ec2.get_all_reserved_instances_offerings() + self.assertEqual(len(response), 2) + instance = response[0] + self.assertEqual(instance.id, '2964d1bf71d8') + self.assertEqual(instance.instance_type, 'c1.medium') + self.assertEqual(instance.availability_zone, 'us-east-1c') + self.assertEqual(instance.duration, 94608000) + self.assertEqual(instance.fixed_price, '775.0') + self.assertEqual(instance.usage_price, '0.0') + self.assertEqual(instance.description, 'product description') + self.assertEqual(instance.instance_tenancy, 'default') + self.assertEqual(instance.currency_code, 'USD') + self.assertEqual(instance.offering_type, 'Heavy Utilization') + self.assertEqual(len(instance.recurring_charges), 1) + self.assertEqual(instance.recurring_charges[0].frequency, 'Hourly') + self.assertEqual(instance.recurring_charges[0].amount, '0.095') + self.assertEqual(len(instance.pricing_details), 1) + self.assertEqual(instance.pricing_details[0].price, '0.045') + self.assertEqual(instance.pricing_details[0].count, '1') + + def test_get_reserved_instance_offerings_params(self): + self.set_http_response(status_code=200) + self.ec2.get_all_reserved_instances_offerings( + reserved_instances_offering_ids=['id1', 'id2'], + instance_type='t1.micro', + availability_zone='us-east-1', + product_description='description', + instance_tenancy='dedicated', + offering_type='offering_type', + include_marketplace=False, + min_duration=100, + max_duration=1000, + max_instance_count=1, + next_token='next_token', + max_results=10 + ) + self.assert_request_parameters({ + 'Action': 'DescribeReservedInstancesOfferings', + 'ReservedInstancesOfferingId.1': 'id1', + 'ReservedInstancesOfferingId.2': 'id2', + 'InstanceType': 't1.micro', + 'AvailabilityZone': 'us-east-1', + 'ProductDescription': 'description', + 'InstanceTenancy': 'dedicated', + 'OfferingType': 'offering_type', + 'IncludeMarketplace': 'false', + 'MinDuration': '100', + 'MaxDuration': '1000', + 'MaxInstanceCount': '1', + 'NextToken': 'next_token', + 'MaxResults': '10', }, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestPurchaseReservedInstanceOffering(TestEC2ConnectionBase): + def default_body(self): + return b"""""" + + def test_serialized_api_args(self): + self.set_http_response(status_code=200) + response = self.ec2.purchase_reserved_instance_offering( + 'offering_id', 1, (100.0, 'USD')) + self.assert_request_parameters({ + 'Action': 'PurchaseReservedInstancesOffering', + 'InstanceCount': 1, + 'ReservedInstancesOfferingId': 'offering_id', + 'LimitPrice.Amount': '100.0', + 'LimitPrice.CurrencyCode': 'USD', }, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + +class TestCreateImage(TestEC2ConnectionBase): + def default_body(self): + return b""" + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + ami-4fa54026 +""" + + def test_minimal(self): + self.set_http_response(status_code=200) + response = self.ec2.create_image( + 'instance_id', 'name') + self.assert_request_parameters({ + 'Action': 'CreateImage', + 'InstanceId': 'instance_id', + 'Name': 'name'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + def test_block_device_mapping(self): + self.set_http_response(status_code=200) + bdm = BlockDeviceMapping() + bdm['test'] = BlockDeviceType() + response = self.ec2.create_image( + 'instance_id', 'name', block_device_mapping=bdm) + self.assert_request_parameters({ + 'Action': 'CreateImage', + 'InstanceId': 'instance_id', + 'Name': 'name', + 'BlockDeviceMapping.1.DeviceName': 'test', + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'false'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + +class TestCancelReservedInstancesListing(TestEC2ConnectionBase): + def default_body(self): + return b""" + + request_id + + + listing_id + instance_id + 2012-07-12T16:55:28.000Z + 2012-07-12T16:55:28.000Z + cancelled + CANCELLED + + + Available + 0 + + + Sold + 0 + + + Cancelled + 1 + + + Pending + 0 + + + + + 5 + 166.64 + USD + false + + + 4 + 133.32 + USD + false + + + 3 + 99.99 + USD + false + + + 2 + 66.66 + USD + false + + + 1 + 33.33 + USD + false + + + + XqJIt1342112125076 + + + + """ + + def test_reserved_instances_listing(self): + self.set_http_response(status_code=200) + response = self.ec2.cancel_reserved_instances_listing() + self.assertEqual(len(response), 1) + cancellation = response[0] + self.assertEqual(cancellation.status, 'cancelled') + self.assertEqual(cancellation.status_message, 'CANCELLED') + self.assertEqual(len(cancellation.instance_counts), 4) + first = cancellation.instance_counts[0] + self.assertEqual(first.state, 'Available') + self.assertEqual(first.instance_count, 0) + self.assertEqual(len(cancellation.price_schedules), 5) + schedule = cancellation.price_schedules[0] + self.assertEqual(schedule.term, 5) + self.assertEqual(schedule.price, '166.64') + self.assertEqual(schedule.currency_code, 'USD') + self.assertEqual(schedule.active, False) + + +class TestCreateReservedInstancesListing(TestEC2ConnectionBase): + def default_body(self): + return b""" + + request_id + + + listing_id + instance_id + 2012-07-17T17:11:09.449Z + 2012-07-17T17:11:09.468Z + active + ACTIVE + + + Available + 1 + + + Sold + 0 + + + Cancelled + 0 + + + Pending + 0 + + + + + 11 + 2.5 + USD + true + + + 10 + 2.5 + USD + false + + + 9 + 2.5 + USD + false + + + 8 + 2.0 + USD + false + + + 7 + 2.0 + USD + false + + + 6 + 2.0 + USD + false + + + 5 + 1.5 + USD + false + + + 4 + 1.5 + USD + false + + + 3 + 0.7 + USD + false + + + 2 + 0.7 + USD + false + + + 1 + 0.1 + USD + false + + + + myIdempToken1 + + + + """ + + def test_create_reserved_instances_listing(self): + self.set_http_response(status_code=200) + response = self.ec2.create_reserved_instances_listing( + 'instance_id', 1, [('2.5', 11), ('2.0', 8)], 'client_token') + self.assertEqual(len(response), 1) + cancellation = response[0] + self.assertEqual(cancellation.status, 'active') + self.assertEqual(cancellation.status_message, 'ACTIVE') + self.assertEqual(len(cancellation.instance_counts), 4) + first = cancellation.instance_counts[0] + self.assertEqual(first.state, 'Available') + self.assertEqual(first.instance_count, 1) + self.assertEqual(len(cancellation.price_schedules), 11) + schedule = cancellation.price_schedules[0] + self.assertEqual(schedule.term, 11) + self.assertEqual(schedule.price, '2.5') + self.assertEqual(schedule.currency_code, 'USD') + self.assertEqual(schedule.active, True) + + self.assert_request_parameters({ + 'Action': 'CreateReservedInstancesListing', + 'ReservedInstancesId': 'instance_id', + 'InstanceCount': '1', + 'ClientToken': 'client_token', + 'PriceSchedules.0.Price': '2.5', + 'PriceSchedules.0.Term': '11', + 'PriceSchedules.1.Price': '2.0', + 'PriceSchedules.1.Term': '8', }, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + +class TestDescribeSpotInstanceRequests(TestEC2ConnectionBase): + def default_body(self): + return b""" + + requestid + + + sir-id + 0.003000 + one-time + active + + fulfilled + 2012-10-19T18:09:26.000Z + Your Spot request is fulfilled. + + mylaunchgroup + + ami-id + mykeypair + + + sg-id + groupname + + + t1.micro + + false + + + i-id + 2012-10-19T18:07:05.000Z + Linux/UNIX + us-east-1d + + + + """ + + def test_describe_spot_instance_requets(self): + self.set_http_response(status_code=200) + response = self.ec2.get_all_spot_instance_requests() + self.assertEqual(len(response), 1) + spotrequest = response[0] + self.assertEqual(spotrequest.id, 'sir-id') + self.assertEqual(spotrequest.price, 0.003) + self.assertEqual(spotrequest.type, 'one-time') + self.assertEqual(spotrequest.state, 'active') + self.assertEqual(spotrequest.fault, None) + self.assertEqual(spotrequest.valid_from, None) + self.assertEqual(spotrequest.valid_until, None) + self.assertEqual(spotrequest.launch_group, 'mylaunchgroup') + self.assertEqual(spotrequest.launched_availability_zone, 'us-east-1d') + self.assertEqual(spotrequest.product_description, 'Linux/UNIX') + self.assertEqual(spotrequest.availability_zone_group, None) + self.assertEqual(spotrequest.create_time, + '2012-10-19T18:07:05.000Z') + self.assertEqual(spotrequest.instance_id, 'i-id') + launch_spec = spotrequest.launch_specification + self.assertEqual(launch_spec.key_name, 'mykeypair') + self.assertEqual(launch_spec.instance_type, 't1.micro') + self.assertEqual(launch_spec.image_id, 'ami-id') + self.assertEqual(launch_spec.placement, None) + self.assertEqual(launch_spec.kernel, None) + self.assertEqual(launch_spec.ramdisk, None) + self.assertEqual(launch_spec.monitored, False) + self.assertEqual(launch_spec.subnet_id, None) + self.assertEqual(launch_spec.block_device_mapping, None) + self.assertEqual(launch_spec.instance_profile, None) + self.assertEqual(launch_spec.ebs_optimized, False) + status = spotrequest.status + self.assertEqual(status.code, 'fulfilled') + self.assertEqual(status.update_time, '2012-10-19T18:09:26.000Z') + self.assertEqual(status.message, 'Your Spot request is fulfilled.') + + +class TestCopySnapshot(TestEC2ConnectionBase): + def default_body(self): + return b""" + + request_id + snap-copied-id + + """ + + def test_copy_snapshot(self): + self.set_http_response(status_code=200) + snapshot_id = self.ec2.copy_snapshot('us-west-2', 'snap-id', + 'description') + self.assertEqual(snapshot_id, 'snap-copied-id') + + self.assert_request_parameters({ + 'Action': 'CopySnapshot', + 'Description': 'description', + 'SourceRegion': 'us-west-2', + 'SourceSnapshotId': 'snap-id'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + +class TestCopyImage(TestEC2ConnectionBase): + def default_body(self): + return b""" + + request_id + ami-copied-id + + """ + + def test_copy_image(self): + self.set_http_response(status_code=200) + copied_ami = self.ec2.copy_image('us-west-2', 'ami-id', + 'name', 'description', 'client-token') + self.assertEqual(copied_ami.image_id, 'ami-copied-id') + + self.assert_request_parameters({ + 'Action': 'CopyImage', + 'Description': 'description', + 'Name': 'name', + 'SourceRegion': 'us-west-2', + 'SourceImageId': 'ami-id', + 'ClientToken': 'client-token'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + def test_copy_image_without_name(self): + self.set_http_response(status_code=200) + copied_ami = self.ec2.copy_image('us-west-2', 'ami-id', + description='description', + client_token='client-token') + self.assertEqual(copied_ami.image_id, 'ami-copied-id') + + self.assert_request_parameters({ + 'Action': 'CopyImage', + 'Description': 'description', + 'SourceRegion': 'us-west-2', + 'SourceImageId': 'ami-id', + 'ClientToken': 'client-token'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + +class TestAccountAttributes(TestEC2ConnectionBase): + def default_body(self): + return b""" + + 6d042e8a-4bc3-43e8-8265-3cbc54753f14 + + + vpc-max-security-groups-per-interface + + + 5 + + + + + max-instances + + + 50 + + + + + supported-platforms + + + EC2 + + + VPC + + + + + default-vpc + + + none + + + + + + """ + + def test_describe_account_attributes(self): + self.set_http_response(status_code=200) + parsed = self.ec2.describe_account_attributes() + self.assertEqual(len(parsed), 4) + self.assertEqual(parsed[0].attribute_name, + 'vpc-max-security-groups-per-interface') + self.assertEqual(parsed[0].attribute_values, + ['5']) + self.assertEqual(parsed[-1].attribute_name, + 'default-vpc') + self.assertEqual(parsed[-1].attribute_values, + ['none']) + + +class TestDescribeVPCAttribute(TestEC2ConnectionBase): + def default_body(self): + return b""" + + request_id + vpc-id + + false + + + """ + + def test_describe_vpc_attribute(self): + self.set_http_response(status_code=200) + parsed = self.ec2.describe_vpc_attribute('vpc-id', + 'enableDnsHostnames') + self.assertEqual(parsed.vpc_id, 'vpc-id') + self.assertFalse(parsed.enable_dns_hostnames) + self.assert_request_parameters({ + 'Action': 'DescribeVpcAttribute', + 'VpcId': 'vpc-id', + 'Attribute': 'enableDnsHostnames', }, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + +class TestGetAllNetworkInterfaces(TestEC2ConnectionBase): + def default_body(self): + return b""" + + fc45294c-006b-457b-bab9-012f5b3b0e40 + + + eni-0f62d866 + subnet-c53c87ac + vpc-cc3c87a5 + ap-southeast-1b + + 053230519467 + false + in-use + 02:81:60:cb:27:37 + 10.0.0.146 + true + + + sg-3f4b5653 + default + + + + eni-attach-6537fc0c + i-22197876 + 053230519467 + 5 + attached + 2012-07-01T21:45:27.000Z + true + + + + + 10.0.0.146 + true + + + 10.0.0.148 + false + + + 10.0.0.150 + false + + + + +""" + + def test_get_all_network_interfaces(self): + self.set_http_response(status_code=200) + result = self.ec2.get_all_network_interfaces(network_interface_ids=['eni-0f62d866']) + self.assert_request_parameters({ + 'Action': 'DescribeNetworkInterfaces', + 'NetworkInterfaceId.1': 'eni-0f62d866'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(len(result), 1) + self.assertEqual(result[0].id, 'eni-0f62d866') + + def test_attachment_has_device_index(self): + self.set_http_response(status_code=200) + parsed = self.ec2.get_all_network_interfaces() + + self.assertEqual(5, parsed[0].attachment.device_index) + + +class TestGetAllImages(TestEC2ConnectionBase): + def default_body(self): + return b""" + + e32375e8-4ac3-4099-a8bf-3ec902b9023e + + + ami-abcd1234 + 111111111111/windows2008r2-hvm-i386-20130702 + available + 111111111111 + false + i386 + machine + windows + true + Windows Test + Windows Test Description + + + bp-6ba54002 + + + ebs + /dev/sda1 + + + /dev/sda1 + + snap-abcd1234 + 30 + true + standard + + + + xvdb + ephemeral0 + + + xvdc + ephemeral1 + + + xvdd + ephemeral2 + + + xvde + ephemeral3 + + + hvm + xen + + +""" + + def test_get_all_images(self): + self.set_http_response(status_code=200) + parsed = self.ec2.get_all_images() + self.assertEquals(1, len(parsed)) + self.assertEquals("ami-abcd1234", parsed[0].id) + self.assertEquals("111111111111/windows2008r2-hvm-i386-20130702", parsed[0].location) + self.assertEquals("available", parsed[0].state) + self.assertEquals("111111111111", parsed[0].ownerId) + self.assertEquals("111111111111", parsed[0].owner_id) + self.assertEquals(False, parsed[0].is_public) + self.assertEquals("i386", parsed[0].architecture) + self.assertEquals("machine", parsed[0].type) + self.assertEquals(None, parsed[0].kernel_id) + self.assertEquals(None, parsed[0].ramdisk_id) + self.assertEquals(None, parsed[0].owner_alias) + self.assertEquals("windows", parsed[0].platform) + self.assertEquals("Windows Test", parsed[0].name) + self.assertEquals("Windows Test Description", parsed[0].description) + self.assertEquals("ebs", parsed[0].root_device_type) + self.assertEquals("/dev/sda1", parsed[0].root_device_name) + self.assertEquals("hvm", parsed[0].virtualization_type) + self.assertEquals("xen", parsed[0].hypervisor) + self.assertEquals(None, parsed[0].instance_lifecycle) + + # 1 billing product parsed into a list + self.assertEquals(1, len(parsed[0].billing_products)) + self.assertEquals("bp-6ba54002", parsed[0].billing_products[0]) + + # Just verify length, there is already a block_device_mapping test + self.assertEquals(5, len(parsed[0].block_device_mapping)) + + # TODO: No tests for product codes? + + +class TestModifyInterfaceAttribute(TestEC2ConnectionBase): + def default_body(self): + return b""" + + 657a4623-5620-4232-b03b-427e852d71cf + true + +""" + + def test_modify_description(self): + self.set_http_response(status_code=200) + self.ec2.modify_network_interface_attribute('id', 'description', 'foo') + + self.assert_request_parameters({ + 'Action': 'ModifyNetworkInterfaceAttribute', + 'NetworkInterfaceId': 'id', + 'Description.Value': 'foo'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + def test_modify_source_dest_check_bool(self): + self.set_http_response(status_code=200) + self.ec2.modify_network_interface_attribute('id', 'sourceDestCheck', + True) + + self.assert_request_parameters({ + 'Action': 'ModifyNetworkInterfaceAttribute', + 'NetworkInterfaceId': 'id', + 'SourceDestCheck.Value': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + def test_modify_source_dest_check_str(self): + self.set_http_response(status_code=200) + self.ec2.modify_network_interface_attribute('id', 'sourceDestCheck', + 'true') + + self.assert_request_parameters({ + 'Action': 'ModifyNetworkInterfaceAttribute', + 'NetworkInterfaceId': 'id', + 'SourceDestCheck.Value': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + def test_modify_source_dest_check_invalid(self): + self.set_http_response(status_code=200) + + with self.assertRaises(ValueError): + self.ec2.modify_network_interface_attribute('id', + 'sourceDestCheck', + 123) + + def test_modify_delete_on_termination_str(self): + self.set_http_response(status_code=200) + self.ec2.modify_network_interface_attribute('id', + 'deleteOnTermination', + True, attachment_id='bar') + + self.assert_request_parameters({ + 'Action': 'ModifyNetworkInterfaceAttribute', + 'NetworkInterfaceId': 'id', + 'Attachment.AttachmentId': 'bar', + 'Attachment.DeleteOnTermination': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + def test_modify_delete_on_termination_bool(self): + self.set_http_response(status_code=200) + self.ec2.modify_network_interface_attribute('id', + 'deleteOnTermination', + 'false', + attachment_id='bar') + + self.assert_request_parameters({ + 'Action': 'ModifyNetworkInterfaceAttribute', + 'NetworkInterfaceId': 'id', + 'Attachment.AttachmentId': 'bar', + 'Attachment.DeleteOnTermination': 'false'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + def test_modify_delete_on_termination_invalid(self): + self.set_http_response(status_code=200) + + with self.assertRaises(ValueError): + self.ec2.modify_network_interface_attribute('id', + 'deleteOnTermination', + 123, + attachment_id='bar') + + def test_modify_group_set_list(self): + self.set_http_response(status_code=200) + self.ec2.modify_network_interface_attribute('id', 'groupSet', + ['sg-1', 'sg-2']) + + self.assert_request_parameters({ + 'Action': 'ModifyNetworkInterfaceAttribute', + 'NetworkInterfaceId': 'id', + 'SecurityGroupId.1': 'sg-1', + 'SecurityGroupId.2': 'sg-2'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + def test_modify_group_set_invalid(self): + self.set_http_response(status_code=200) + + with self.assertRaisesRegexp(TypeError, 'iterable'): + self.ec2.modify_network_interface_attribute('id', 'groupSet', + False) + + def test_modify_attr_invalid(self): + self.set_http_response(status_code=200) + + with self.assertRaisesRegexp(ValueError, 'Unknown attribute'): + self.ec2.modify_network_interface_attribute('id', 'invalid', 0) + + +class TestConnectToRegion(unittest.TestCase): + def setUp(self): + self.https_connection = Mock(spec=httplib.HTTPSConnection) + self.https_connection_factory = ( + Mock(return_value=self.https_connection), ()) + + def test_aws_region(self): + region = boto.ec2.RegionData.keys()[0] + self.ec2 = boto.ec2.connect_to_region( + region, + https_connection_factory=self.https_connection_factory, + aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key' + ) + self.assertEqual(boto.ec2.RegionData[region], self.ec2.host) + + def test_non_aws_region(self): + self.ec2 = boto.ec2.connect_to_region( + 'foo', + https_connection_factory=self.https_connection_factory, + aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + region=RegionInfo(name='foo', endpoint='https://foo.com/bar') + ) + self.assertEqual('https://foo.com/bar', self.ec2.host) + + def test_missing_region(self): + self.ec2 = boto.ec2.connect_to_region( + 'foo', + https_connection_factory=self.https_connection_factory, + aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key' + ) + self.assertEqual(None, self.ec2) + + +class TestTrimSnapshots(TestEC2ConnectionBase): + """ + Test snapshot trimming functionality by ensuring that expected calls + are made when given a known set of volume snapshots. + """ + def _get_snapshots(self): + """ + Generate a list of fake snapshots with names and dates. + """ + snaps = [] + + # Generate some dates offset by days, weeks, months. + # This is to validate the various types of snapshot logic handled by + # ``trim_snapshots``. + now = datetime.now() + dates = [ + now, + now - timedelta(days=1), + now - timedelta(days=2), + now - timedelta(days=7), + now - timedelta(days=14), + # We want to simulate 30/60/90-day snapshots, but February is + # short (only 28 days), so we decrease the delta by 2 days apiece. + # This prevents the ``delete_snapshot`` code below from being + # called, since they don't fall outside the allowed timeframes + # for the snapshots. + datetime(now.year, now.month, 1) - timedelta(days=28), + datetime(now.year, now.month, 1) - timedelta(days=58), + datetime(now.year, now.month, 1) - timedelta(days=88) + ] + + for date in dates: + # Create a fake snapshot for each date + snap = Snapshot(self.ec2) + snap.tags['Name'] = 'foo' + # Times are expected to be ISO8601 strings + snap.start_time = date.strftime('%Y-%m-%dT%H:%M:%S.000Z') + snaps.append(snap) + + return snaps + + def test_trim_defaults(self): + """ + Test trimming snapshots with the default arguments, which should + keep all monthly backups forever. The result of this test should + be that nothing is deleted. + """ + # Setup mocks + orig = { + 'get_all_snapshots': self.ec2.get_all_snapshots, + 'delete_snapshot': self.ec2.delete_snapshot + } + + snaps = self._get_snapshots() + + self.ec2.get_all_snapshots = MagicMock(return_value=snaps) + self.ec2.delete_snapshot = MagicMock() + + # Call the tested method + self.ec2.trim_snapshots() + + # Assertions + self.assertEqual(True, self.ec2.get_all_snapshots.called) + self.assertEqual(False, self.ec2.delete_snapshot.called) + + # Restore + self.ec2.get_all_snapshots = orig['get_all_snapshots'] + self.ec2.delete_snapshot = orig['delete_snapshot'] + + def test_trim_months(self): + """ + Test trimming monthly snapshots and ensure that older months + get deleted properly. The result of this test should be that + the two oldest snapshots get deleted. + """ + # Setup mocks + orig = { + 'get_all_snapshots': self.ec2.get_all_snapshots, + 'delete_snapshot': self.ec2.delete_snapshot + } + + snaps = self._get_snapshots() + + self.ec2.get_all_snapshots = MagicMock(return_value=snaps) + self.ec2.delete_snapshot = MagicMock() + + # Call the tested method + self.ec2.trim_snapshots(monthly_backups=1) + + # Assertions + self.assertEqual(True, self.ec2.get_all_snapshots.called) + self.assertEqual(2, self.ec2.delete_snapshot.call_count) + + # Restore + self.ec2.get_all_snapshots = orig['get_all_snapshots'] + self.ec2.delete_snapshot = orig['delete_snapshot'] + + +class TestModifyReservedInstances(TestEC2ConnectionBase): + def default_body(self): + return b""" + bef729b6-0731-4489-8881-2258746ae163 + rimod-3aae219d-3d63-47a9-a7e9-e764example +""" + + def test_serialized_api_args(self): + self.set_http_response(status_code=200) + response = self.ec2.modify_reserved_instances( + 'a-token-goes-here', + reserved_instance_ids=[ + '2567o137-8a55-48d6-82fb-7258506bb497', + ], + target_configurations=[ + ReservedInstancesConfiguration( + availability_zone='us-west-2c', + platform='EC2-VPC', + instance_count=3, + instance_type='c3.large' + ), + ] + ) + self.assert_request_parameters({ + 'Action': 'ModifyReservedInstances', + 'ClientToken': 'a-token-goes-here', + 'ReservedInstancesConfigurationSetItemType.0.AvailabilityZone': 'us-west-2c', + 'ReservedInstancesConfigurationSetItemType.0.InstanceCount': 3, + 'ReservedInstancesConfigurationSetItemType.0.Platform': 'EC2-VPC', + 'ReservedInstancesConfigurationSetItemType.0.InstanceType': 'c3.large', + 'ReservedInstancesId.1': '2567o137-8a55-48d6-82fb-7258506bb497' + }, ignore_params_values=[ + 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version' + ]) + + self.assertEqual(response, 'rimod-3aae219d-3d63-47a9-a7e9-e764example') + + +class TestDescribeReservedInstancesModifications(TestEC2ConnectionBase): + def default_body(self): + return b""" + eb4a6e3c-3689-445c-b536-19e38df35898 + + + rimod-49b9433e-fdc7-464a-a6e5-9dabcexample + + + 2567o137-8a55-48d6-82fb-7258506bb497 + + + + + 9d5cb137-5d65-4479-b4ac-8c337example + + us-east-1b + EC2-VPC + 1 + + + + 2013-09-02T21:20:19.637Z + 2013-09-02T21:38:24.143Z + 2013-09-02T21:00:00.000Z + fulfilled + token-f5b56c05-09b0-4d17-8d8c-c75d8a67b806 + + +""" + + def test_serialized_api_args(self): + self.set_http_response(status_code=200) + response = self.ec2.describe_reserved_instances_modifications( + reserved_instances_modification_ids=[ + '2567o137-8a55-48d6-82fb-7258506bb497' + ], + filters={ + 'status': 'processing', + } + ) + self.assert_request_parameters({ + 'Action': 'DescribeReservedInstancesModifications', + 'Filter.1.Name': 'status', + 'Filter.1.Value.1': 'processing', + 'ReservedInstancesModificationId.1': '2567o137-8a55-48d6-82fb-7258506bb497' + }, ignore_params_values=[ + 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version' + ]) + + # Make sure the response was parsed correctly. + self.assertEqual( + response[0].modification_id, + 'rimod-49b9433e-fdc7-464a-a6e5-9dabcexample' + ) + self.assertEqual( + response[0].create_date, + datetime(2013, 9, 2, 21, 20, 19, 637000) + ) + self.assertEqual( + response[0].update_date, + datetime(2013, 9, 2, 21, 38, 24, 143000) + ) + self.assertEqual( + response[0].effective_date, + datetime(2013, 9, 2, 21, 0, 0, 0) + ) + self.assertEqual( + response[0].status, + 'fulfilled' + ) + self.assertEqual( + response[0].status_message, + None + ) + self.assertEqual( + response[0].client_token, + 'token-f5b56c05-09b0-4d17-8d8c-c75d8a67b806' + ) + self.assertEqual( + response[0].reserved_instances[0].id, + '2567o137-8a55-48d6-82fb-7258506bb497' + ) + self.assertEqual( + response[0].modification_results[0].availability_zone, + 'us-east-1b' + ) + self.assertEqual( + response[0].modification_results[0].platform, + 'EC2-VPC' + ) + self.assertEqual( + response[0].modification_results[0].instance_count, + 1 + ) + self.assertEqual(len(response), 1) + + +class TestRegisterImage(TestEC2ConnectionBase): + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + ami-1a2b3c4d + + """ + + def test_vm_type_default(self): + self.set_http_response(status_code=200) + self.ec2.register_image('name', 'description', + image_location='s3://foo') + + self.assert_request_parameters({ + 'Action': 'RegisterImage', + 'ImageLocation': 's3://foo', + 'Name': 'name', + 'Description': 'description', + }, ignore_params_values=[ + 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version' + ]) + + def test_vm_type_hvm(self): + self.set_http_response(status_code=200) + self.ec2.register_image('name', 'description', + image_location='s3://foo', + virtualization_type='hvm') + + self.assert_request_parameters({ + 'Action': 'RegisterImage', + 'ImageLocation': 's3://foo', + 'Name': 'name', + 'Description': 'description', + 'VirtualizationType': 'hvm' + }, ignore_params_values=[ + 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version' + ]) + + def test_sriov_net_support_simple(self): + self.set_http_response(status_code=200) + self.ec2.register_image('name', 'description', + image_location='s3://foo', + sriov_net_support='simple') + + self.assert_request_parameters({ + 'Action': 'RegisterImage', + 'ImageLocation': 's3://foo', + 'Name': 'name', + 'Description': 'description', + 'SriovNetSupport': 'simple' + }, ignore_params_values=[ + 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version' + ]) + + def test_volume_delete_on_termination_on(self): + self.set_http_response(status_code=200) + self.ec2.register_image('name', 'description', + snapshot_id='snap-12345678', + delete_root_volume_on_termination=True) + + self.assert_request_parameters({ + 'Action': 'RegisterImage', + 'Name': 'name', + 'Description': 'description', + 'BlockDeviceMapping.1.DeviceName': None, + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'true', + 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345678', + }, ignore_params_values=[ + 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version' + ]) + + def test_volume_delete_on_termination_default(self): + self.set_http_response(status_code=200) + self.ec2.register_image('name', 'description', + snapshot_id='snap-12345678') + + self.assert_request_parameters({ + 'Action': 'RegisterImage', + 'Name': 'name', + 'Description': 'description', + 'BlockDeviceMapping.1.DeviceName': None, + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'false', + 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345678', + }, ignore_params_values=[ + 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version' + ]) + + +class TestTerminateInstances(TestEC2ConnectionBase): + def default_body(self): + return b""" + + req-59a9ad52-0434-470c-ad48-4f89ded3a03e + + + i-000043a2 + + 16 + running + + + 16 + running + + + + + """ + + def test_terminate_bad_response(self): + self.set_http_response(status_code=200) + self.ec2.terminate_instances('foo') + + +class TestDescribeInstances(TestEC2ConnectionBase): + + def default_body(self): + return b""" + + + """ + + def test_default_behavior(self): + self.set_http_response(status_code=200) + self.ec2.get_all_instances() + self.assert_request_parameters({ + 'Action': 'DescribeInstances'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + self.ec2.get_all_reservations() + self.assert_request_parameters({ + 'Action': 'DescribeInstances'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + self.ec2.get_only_instances() + self.assert_request_parameters({ + 'Action': 'DescribeInstances'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + def test_max_results(self): + self.set_http_response(status_code=200) + self.ec2.get_all_instances( + max_results=10 + ) + self.assert_request_parameters({ + 'Action': 'DescribeInstances', + 'MaxResults': 10}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + def test_next_token(self): + self.set_http_response(status_code=200) + self.ec2.get_all_reservations( + next_token='abcdefgh', + ) + self.assert_request_parameters({ + 'Action': 'DescribeInstances', + 'NextToken': 'abcdefgh'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestDescribeTags(TestEC2ConnectionBase): + + def default_body(self): + return b""" + + + """ + + def test_default_behavior(self): + self.set_http_response(status_code=200) + self.ec2.get_all_tags() + self.assert_request_parameters({ + 'Action': 'DescribeTags'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + def test_max_results(self): + self.set_http_response(status_code=200) + self.ec2.get_all_tags( + max_results=10 + ) + self.assert_request_parameters({ + 'Action': 'DescribeTags', + 'MaxResults': 10}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestSignatureAlteration(TestEC2ConnectionBase): + def test_unchanged(self): + self.assertEqual( + self.service_connection._required_auth_capability(), + ['hmac-v4'] + ) + + def test_switched(self): + region = RegionInfo( + name='cn-north-1', + endpoint='ec2.cn-north-1.amazonaws.com.cn', + connection_cls=EC2Connection + ) + + conn = self.connection_class( + aws_access_key_id='less', + aws_secret_access_key='more', + region=region + ) + self.assertEqual( + conn._required_auth_capability(), + ['hmac-v4'] + ) + + +class TestAssociateAddress(TestEC2ConnectionBase): + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + eipassoc-fc5ca095 + + """ + + def test_associate_address(self): + self.set_http_response(status_code=200) + result = self.ec2.associate_address(instance_id='i-1234', + public_ip='192.0.2.1') + self.assertEqual(True, result) + + def test_associate_address_object(self): + self.set_http_response(status_code=200) + result = self.ec2.associate_address_object(instance_id='i-1234', + public_ip='192.0.2.1') + self.assertEqual('eipassoc-fc5ca095', result.association_id) + + +class TestAssociateAddressFail(TestEC2ConnectionBase): + def default_body(self): + return b""" + + + + InvalidInstanceID.NotFound + The instance ID 'i-4cbc822a' does not exist + + + ea966190-f9aa-478e-9ede-cb5432daacc0 + Failure + + """ + + def test_associate_address(self): + self.set_http_response(status_code=200) + result = self.ec2.associate_address(instance_id='i-1234', + public_ip='192.0.2.1') + self.assertEqual(False, result) + + +class TestDescribeVolumes(TestEC2ConnectionBase): + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + vol-1a2b3c4d + 80 + + us-east-1a + in-use + YYYY-MM-DDTHH:MM:SS.SSSZ + + + vol-1a2b3c4d + i-1a2b3c4d + /dev/sdh + attached + YYYY-MM-DDTHH:MM:SS.SSSZ + false + + + standard + true + + + vol-5e6f7a8b + 80 + + us-east-1a + in-use + YYYY-MM-DDTHH:MM:SS.SSSZ + + + vol-5e6f7a8b + i-5e6f7a8b + /dev/sdz + attached + YYYY-MM-DDTHH:MM:SS.SSSZ + false + + + standard + false + + + + """ + + def test_get_all_volumes(self): + self.set_http_response(status_code=200) + result = self.ec2.get_all_volumes(volume_ids=['vol-1a2b3c4d', 'vol-5e6f7a8b']) + self.assert_request_parameters({ + 'Action': 'DescribeVolumes', + 'VolumeId.1': 'vol-1a2b3c4d', + 'VolumeId.2': 'vol-5e6f7a8b'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(len(result), 2) + self.assertEqual(result[0].id, 'vol-1a2b3c4d') + self.assertTrue(result[0].encrypted) + self.assertEqual(result[1].id, 'vol-5e6f7a8b') + self.assertFalse(result[1].encrypted) + + +class TestDescribeSnapshots(TestEC2ConnectionBase): + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + snap-1a2b3c4d + vol-1a2b3c4d + pending + YYYY-MM-DDTHH:MM:SS.SSSZ + 80% + 111122223333 + 15 + Daily Backup + + true + + + + + snap-5e6f7a8b + vol-5e6f7a8b + completed + YYYY-MM-DDTHH:MM:SS.SSSZ + 100% + 111122223333 + 15 + Daily Backup + + false + + + + """ + + def test_get_all_snapshots(self): + self.set_http_response(status_code=200) + result = self.ec2.get_all_snapshots(snapshot_ids=['snap-1a2b3c4d', 'snap-5e6f7a8b']) + self.assert_request_parameters({ + 'Action': 'DescribeSnapshots', + 'SnapshotId.1': 'snap-1a2b3c4d', + 'SnapshotId.2': 'snap-5e6f7a8b'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(len(result), 2) + self.assertEqual(result[0].id, 'snap-1a2b3c4d') + self.assertTrue(result[0].encrypted) + self.assertEqual(result[1].id, 'snap-5e6f7a8b') + self.assertFalse(result[1].encrypted) + + +class TestCreateVolume(TestEC2ConnectionBase): + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + vol-1a2b3c4d + 80 + + us-east-1a + creating + YYYY-MM-DDTHH:MM:SS.000Z + standard + true + + """ + + def test_create_volume(self): + self.set_http_response(status_code=200) + result = self.ec2.create_volume(80, 'us-east-1e', snapshot='snap-1a2b3c4d', + encrypted=True) + self.assert_request_parameters({ + 'Action': 'CreateVolume', + 'AvailabilityZone': 'us-east-1e', + 'Size': 80, + 'SnapshotId': 'snap-1a2b3c4d', + 'Encrypted': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(result.id, 'vol-1a2b3c4d') + self.assertTrue(result.encrypted) + + +class TestGetClassicLinkInstances(TestEC2ConnectionBase): + def default_body(self): + return b""" + + f4bf0cc6-5967-4687-9355-90ce48394bd3 + + + i-31489bd8 + vpc-9d24f8f8 + + + sg-9b4343fe + + + + + Name + hello + + + + + + """ + def test_get_classic_link_instances(self): + self.set_http_response(status_code=200) + response = self.ec2.get_all_classic_link_instances() + self.assertEqual(len(response), 1) + instance = response[0] + self.assertEqual(instance.id, 'i-31489bd8') + self.assertEqual(instance.vpc_id, 'vpc-9d24f8f8') + self.assertEqual(len(instance.groups), 1) + self.assertEqual(instance.groups[0].id, 'sg-9b4343fe') + self.assertEqual(instance.tags, {'Name': 'hello'}) + + + def test_get_classic_link_instances_params(self): + self.set_http_response(status_code=200) + self.ec2.get_all_classic_link_instances( + instance_ids=['id1', 'id2'], + filters={'GroupId': 'sg-9b4343fe'}, + dry_run=True, + next_token='next_token', + max_results=10 + ) + self.assert_request_parameters({ + 'Action': 'DescribeClassicLinkInstances', + 'InstanceId.1': 'id1', + 'InstanceId.2': 'id2', + 'Filter.1.Name': 'GroupId', + 'Filter.1.Value.1': 'sg-9b4343fe', + 'DryRun': 'true', + 'NextToken': 'next_token', + 'MaxResults': 10}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_ec2object.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_ec2object.py new file mode 100644 index 0000000000000000000000000000000000000000..14841e91b64070107336bf171b74357429520eb0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_ec2object.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python + +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.ec2.connection import EC2Connection +from boto.ec2.ec2object import TaggedEC2Object + + +CREATE_TAGS_RESPONSE = br""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true + +""" + + +DELETE_TAGS_RESPONSE = br""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true + +""" + + +class TestAddTags(AWSMockServiceTestCase): + connection_class = EC2Connection + + def default_body(self): + return CREATE_TAGS_RESPONSE + + def test_add_tag(self): + self.set_http_response(status_code=200) + taggedEC2Object = TaggedEC2Object(self.service_connection) + taggedEC2Object.id = "i-abcd1234" + taggedEC2Object.tags["already_present_key"] = "already_present_value" + + taggedEC2Object.add_tag("new_key", "new_value") + + self.assert_request_parameters({ + 'ResourceId.1': 'i-abcd1234', + 'Action': 'CreateTags', + 'Tag.1.Key': 'new_key', + 'Tag.1.Value': 'new_value'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + self.assertEqual(taggedEC2Object.tags, { + "already_present_key": "already_present_value", + "new_key": "new_value"}) + + def test_add_tags(self): + self.set_http_response(status_code=200) + taggedEC2Object = TaggedEC2Object(self.service_connection) + taggedEC2Object.id = "i-abcd1234" + taggedEC2Object.tags["already_present_key"] = "already_present_value" + + taggedEC2Object.add_tags({"key1": "value1", "key2": "value2"}) + + self.assert_request_parameters({ + 'ResourceId.1': 'i-abcd1234', + 'Action': 'CreateTags', + 'Tag.1.Key': 'key1', + 'Tag.1.Value': 'value1', + 'Tag.2.Key': 'key2', + 'Tag.2.Value': 'value2'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + self.assertEqual(taggedEC2Object.tags, { + "already_present_key": "already_present_value", + "key1": "value1", + "key2": "value2"}) + + +class TestRemoveTags(AWSMockServiceTestCase): + connection_class = EC2Connection + + def default_body(self): + return DELETE_TAGS_RESPONSE + + def test_remove_tag(self): + self.set_http_response(status_code=200) + taggedEC2Object = TaggedEC2Object(self.service_connection) + taggedEC2Object.id = "i-abcd1234" + taggedEC2Object.tags["key1"] = "value1" + taggedEC2Object.tags["key2"] = "value2" + + taggedEC2Object.remove_tag("key1", "value1") + + self.assert_request_parameters({ + 'ResourceId.1': 'i-abcd1234', + 'Action': 'DeleteTags', + 'Tag.1.Key': 'key1', + 'Tag.1.Value': 'value1'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + self.assertEqual(taggedEC2Object.tags, {"key2": "value2"}) + + def test_remove_tag_no_value(self): + self.set_http_response(status_code=200) + taggedEC2Object = TaggedEC2Object(self.service_connection) + taggedEC2Object.id = "i-abcd1234" + taggedEC2Object.tags["key1"] = "value1" + taggedEC2Object.tags["key2"] = "value2" + + taggedEC2Object.remove_tag("key1") + + self.assert_request_parameters({ + 'ResourceId.1': 'i-abcd1234', + 'Action': 'DeleteTags', + 'Tag.1.Key': 'key1'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + self.assertEqual(taggedEC2Object.tags, {"key2": "value2"}) + + def test_remove_tag_empty_value(self): + self.set_http_response(status_code=200) + taggedEC2Object = TaggedEC2Object(self.service_connection) + taggedEC2Object.id = "i-abcd1234" + taggedEC2Object.tags["key1"] = "value1" + taggedEC2Object.tags["key2"] = "value2" + + taggedEC2Object.remove_tag("key1", "") + + self.assert_request_parameters({ + 'ResourceId.1': 'i-abcd1234', + 'Action': 'DeleteTags', + 'Tag.1.Key': 'key1', + 'Tag.1.Value': ''}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + self.assertEqual(taggedEC2Object.tags, + {"key1": "value1", "key2": "value2"}) + + def test_remove_tags(self): + self.set_http_response(status_code=200) + taggedEC2Object = TaggedEC2Object(self.service_connection) + taggedEC2Object.id = "i-abcd1234" + taggedEC2Object.tags["key1"] = "value1" + taggedEC2Object.tags["key2"] = "value2" + + taggedEC2Object.remove_tags({"key1": "value1", "key2": "value2"}) + + self.assert_request_parameters({ + 'ResourceId.1': 'i-abcd1234', + 'Action': 'DeleteTags', + 'Tag.1.Key': 'key1', + 'Tag.1.Value': 'value1', + 'Tag.2.Key': 'key2', + 'Tag.2.Value': 'value2'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + self.assertEqual(taggedEC2Object.tags, {}) + + def test_remove_tags_wrong_values(self): + self.set_http_response(status_code=200) + taggedEC2Object = TaggedEC2Object(self.service_connection) + taggedEC2Object.id = "i-abcd1234" + taggedEC2Object.tags["key1"] = "value1" + taggedEC2Object.tags["key2"] = "value2" + + taggedEC2Object.remove_tags({"key1": "value1", "key2": "value3"}) + + self.assert_request_parameters({ + 'ResourceId.1': 'i-abcd1234', + 'Action': 'DeleteTags', + 'Tag.1.Key': 'key1', + 'Tag.1.Value': 'value1', + 'Tag.2.Key': 'key2', + 'Tag.2.Value': 'value3'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + self.assertEqual(taggedEC2Object.tags, {"key2": "value2"}) + + def test_remove_tags_none_values(self): + self.set_http_response(status_code=200) + taggedEC2Object = TaggedEC2Object(self.service_connection) + taggedEC2Object.id = "i-abcd1234" + taggedEC2Object.tags["key1"] = "value1" + taggedEC2Object.tags["key2"] = "value2" + + taggedEC2Object.remove_tags({"key1": "value1", "key2": None}) + + self.assert_request_parameters({ + 'ResourceId.1': 'i-abcd1234', + 'Action': 'DeleteTags', + 'Tag.1.Key': 'key1', + 'Tag.1.Value': 'value1', + 'Tag.2.Key': 'key2'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + self.assertEqual(taggedEC2Object.tags, {}) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_instance.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..4794d4cc4b2955e1dfa4adda7586e775d54d6628 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_instance.py @@ -0,0 +1,277 @@ +#!/usr/bin/env python + +import base64 +from tests.compat import unittest, mock +from tests.unit import AWSMockServiceTestCase + +from boto.ec2.connection import EC2Connection + +DESCRIBE_INSTANCE_VPC = br""" + + c6132c74-b524-4884-87f5-0f4bde4a9760 + + + r-72ef4a0a + 184906166255 + + + + i-instance + ami-1624987f + + 16 + running + + + + + mykeypair + 0 + + m1.small + 2012-12-14T23:48:37.000Z + + us-east-1d + + default + + aki-88aa75e1 + + disabled + + subnet-0dc60667 + vpc-id + 10.0.0.67 + true + + + sg-id + WebServerSG + + + x86_64 + ebs + /dev/sda1 + + + /dev/sda1 + + vol-id + attached + 2012-12-14T23:48:43.000Z + true + + + + paravirtual + foo + + + Name + + + + xen + + + eni-id + subnet-id + vpc-id + Primary network interface + ownerid + in-use + 10.0.0.67 + true + + + sg-id + WebServerSG + + + + eni-attach-id + 0 + attached + 2012-12-14T23:48:37.000Z + true + + + + 10.0.0.67 + true + + + 10.0.0.54 + false + + + 10.0.0.55 + false + + + + + false + + + + + +""" + +RUN_INSTANCE_RESPONSE = br""" + + ad4b83c2-f606-4c39-90c6-5dcc5be823e1 + r-c5cef7a7 + ownerid + + + sg-id + SSH + + + + + i-ff0f1299 + ami-ed65ba84 + + 0 + pending + + + + + awskeypair + 0 + + t1.micro + 2012-05-30T19:21:18.000Z + + us-east-1a + + default + + aki-b6aa75df + + disabled + + + + sg-99a710f1 + SSH + + + + pending + pending + + i386 + ebs + /dev/sda1 + + paravirtual + + xen + + + arn:aws:iam::ownerid:instance-profile/myinstanceprofile + iamid + + + + +""" + + +class TestRunInstanceResponseParsing(unittest.TestCase): + def testIAMInstanceProfileParsedCorrectly(self): + ec2 = EC2Connection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key') + mock_response = mock.Mock() + mock_response.read.return_value = RUN_INSTANCE_RESPONSE + mock_response.status = 200 + ec2.make_request = mock.Mock(return_value=mock_response) + reservation = ec2.run_instances(image_id='ami-12345') + self.assertEqual(len(reservation.instances), 1) + instance = reservation.instances[0] + self.assertEqual(instance.image_id, 'ami-ed65ba84') + # iamInstanceProfile has an ID element, so we want to make sure + # that this does not map to instance.id (which should be the + # id of the ec2 instance). + self.assertEqual(instance.id, 'i-ff0f1299') + self.assertDictEqual( + instance.instance_profile, + {'arn': ('arn:aws:iam::ownerid:' + 'instance-profile/myinstanceprofile'), + 'id': 'iamid'}) + + +class TestRunInstances(AWSMockServiceTestCase): + connection_class = EC2Connection + + def default_body(self): + # This is a dummy response + return b""" + + + """ + + def test_run_instances_user_data(self): + self.set_http_response(status_code=200) + + response = self.service_connection.run_instances( + image_id='123456', + instance_type='m1.large', + security_groups=['group1', 'group2'], + user_data='#!/bin/bash' + ) + + self.assert_request_parameters({ + 'Action': 'RunInstances', + 'ImageId': '123456', + 'InstanceType': 'm1.large', + 'UserData': base64.b64encode(b'#!/bin/bash').decode('utf-8'), + 'MaxCount': 1, + 'MinCount': 1, + 'SecurityGroup.1': 'group1', + 'SecurityGroup.2': 'group2', + }, ignore_params_values=[ + 'Version', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', + 'Timestamp' + ]) + + +class TestDescribeInstances(AWSMockServiceTestCase): + connection_class = EC2Connection + + def default_body(self): + return DESCRIBE_INSTANCE_VPC + + def test_multiple_private_ip_addresses(self): + self.set_http_response(status_code=200) + + api_response = self.service_connection.get_all_reservations() + self.assertEqual(len(api_response), 1) + + instances = api_response[0].instances + self.assertEqual(len(instances), 1) + + instance = instances[0] + self.assertEqual(len(instance.interfaces), 1) + + interface = instance.interfaces[0] + self.assertEqual(len(interface.private_ip_addresses), 3) + + addresses = interface.private_ip_addresses + self.assertEqual(addresses[0].private_ip_address, '10.0.0.67') + self.assertTrue(addresses[0].primary) + + self.assertEqual(addresses[1].private_ip_address, '10.0.0.54') + self.assertFalse(addresses[1].primary) + + self.assertEqual(addresses[2].private_ip_address, '10.0.0.55') + self.assertFalse(addresses[2].primary) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_instancestatus.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_instancestatus.py new file mode 100644 index 0000000000000000000000000000000000000000..38224d7d63ba1d885ee44b9e8007840f1dad0f0a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_instancestatus.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python + +from tests.compat import mock, unittest + +from boto.ec2.connection import EC2Connection + +INSTANCE_STATUS_RESPONSE = br""" + + 3be1508e-c444-4fef-89cc-0b1223c4f02fEXAMPLE + page-2 + + +""" + + +class TestInstanceStatusResponseParsing(unittest.TestCase): + def test_next_token(self): + ec2 = EC2Connection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key') + mock_response = mock.Mock() + mock_response.read.return_value = INSTANCE_STATUS_RESPONSE + mock_response.status = 200 + ec2.make_request = mock.Mock(return_value=mock_response) + all_statuses = ec2.get_all_instance_status() + self.assertNotIn('IncludeAllInstances', ec2.make_request.call_args[0][1]) + self.assertEqual(all_statuses.next_token, 'page-2') + + def test_include_all_instances(self): + ec2 = EC2Connection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key') + mock_response = mock.Mock() + mock_response.read.return_value = INSTANCE_STATUS_RESPONSE + mock_response.status = 200 + ec2.make_request = mock.Mock(return_value=mock_response) + all_statuses = ec2.get_all_instance_status(include_all_instances=True) + self.assertIn('IncludeAllInstances', ec2.make_request.call_args[0][1]) + self.assertEqual('true', ec2.make_request.call_args[0][1]['IncludeAllInstances']) + self.assertEqual(all_statuses.next_token, 'page-2') + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_instancetype.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_instancetype.py new file mode 100644 index 0000000000000000000000000000000000000000..94e1e8fc1c63d520d8972f25f521d662ba3939bd --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_instancetype.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +import boto.ec2 + +from boto.ec2.connection import EC2Connection + + +class TestEC2ConnectionBase(AWSMockServiceTestCase): + connection_class = EC2Connection + + def setUp(self): + super(TestEC2ConnectionBase, self).setUp() + self.ec2 = self.service_connection + + +class TestReservedInstanceOfferings(TestEC2ConnectionBase): + + def default_body(self): + return b""" + + + + + m1.small15256 + + + + t1.micro15256 + + + + m1.medium110512 + + + + c1.medium210512 + + + + m1.large210512 + + + + m1.xlarge2101024 + + + + c1.xlarge2102048 + + + + m2.xlarge2102048 + + + + m3.xlarge4152048 + + + + m2.2xlarge2304096 + + + + m3.2xlarge4304096 + + + + cc1.4xlarge8603072 + + + + m2.4xlarge8604096 + + + + hi1.4xlarge81206144 + + + + cc2.8xlarge161206144 + + + + cg1.4xlarge1620012288 + + + + cr1.8xlarge1624016384 + + + + hs1.8xlarge4824000119808 + + + + + """ + + def test_get_instance_types(self): + self.set_http_response(status_code=200) + response = self.ec2.get_all_instance_types() + self.assertEqual(len(response), 18) + instance_type = response[0] + self.assertEqual(instance_type.name, 'm1.small') + self.assertEqual(instance_type.cores, '1') + self.assertEqual(instance_type.disk, '5') + self.assertEqual(instance_type.memory, '256') + instance_type = response[17] + self.assertEqual(instance_type.name, 'hs1.8xlarge') + self.assertEqual(instance_type.cores, '48') + self.assertEqual(instance_type.disk, '24000') + self.assertEqual(instance_type.memory, '119808') + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_networkinterface.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_networkinterface.py new file mode 100644 index 0000000000000000000000000000000000000000..c872da0da7fb4541cd51d1cc9645a357a844ec2f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_networkinterface.py @@ -0,0 +1,273 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.compat import mock, unittest + +from boto.exception import BotoClientError +from boto.ec2.networkinterface import NetworkInterfaceCollection +from boto.ec2.networkinterface import NetworkInterfaceSpecification +from boto.ec2.networkinterface import PrivateIPAddress +from boto.ec2.networkinterface import Attachment, NetworkInterface + + +class NetworkInterfaceTests(unittest.TestCase): + def setUp(self): + + self.attachment = Attachment() + self.attachment.id = 'eni-attach-1' + self.attachment.instance_id = 10 + self.attachment.status = "some status" + self.attachment.device_index = 100 + + self.eni_one = NetworkInterface() + self.eni_one.id = 'eni-1' + self.eni_one.status = "one_status" + self.eni_one.attachment = self.attachment + + self.eni_two = NetworkInterface() + self.eni_two.connection = mock.Mock() + self.eni_two.id = 'eni-2' + self.eni_two.status = "two_status" + self.eni_two.attachment = None + + def test_update_with_validate_true_raises_value_error(self): + self.eni_one.connection = mock.Mock() + self.eni_one.connection.get_all_network_interfaces.return_value = [] + with self.assertRaisesRegexp(ValueError, "^eni-1 is not a valid ENI ID$"): + self.eni_one.update(True) + + def test_update_with_result_set_greater_than_0_updates_dict(self): + self.eni_two.connection.get_all_network_interfaces.return_value = [self.eni_one] + self.eni_two.update() + + assert all([self.eni_two.status == "one_status", + self.eni_two.id == 'eni-1', + self.eni_two.attachment == self.attachment]) + + def test_update_returns_status(self): + self.eni_one.connection = mock.Mock() + self.eni_one.connection.get_all_network_interfaces.return_value = [self.eni_two] + retval = self.eni_one.update() + self.assertEqual(retval, "two_status") + + def test_attach_calls_attach_eni(self): + self.eni_one.connection = mock.Mock() + self.eni_one.attach("instance_id", 11) + self.eni_one.connection.attach_network_interface.assert_called_with( + 'eni-1', + "instance_id", + 11, + dry_run=False + ) + + def test_detach_calls_detach_network_interface(self): + self.eni_one.connection = mock.Mock() + self.eni_one.detach() + self.eni_one.connection.detach_network_interface.assert_called_with( + 'eni-attach-1', + False, + dry_run=False + ) + + def test_detach_with_no_attach_data(self): + self.eni_two.connection = mock.Mock() + self.eni_two.detach() + self.eni_two.connection.detach_network_interface.assert_called_with( + None, False, dry_run=False) + + def test_detach_with_force_calls_detach_network_interface_with_force(self): + self.eni_one.connection = mock.Mock() + self.eni_one.detach(True) + self.eni_one.connection.detach_network_interface.assert_called_with( + 'eni-attach-1', True, dry_run=False) + + +class TestNetworkInterfaceCollection(unittest.TestCase): + maxDiff = None + + def setUp(self): + self.private_ip_address1 = PrivateIPAddress( + private_ip_address='10.0.0.10', primary=False) + self.private_ip_address2 = PrivateIPAddress( + private_ip_address='10.0.0.11', primary=False) + self.network_interfaces_spec1 = NetworkInterfaceSpecification( + device_index=1, subnet_id='subnet_id', + description='description1', + private_ip_address='10.0.0.54', delete_on_termination=False, + private_ip_addresses=[self.private_ip_address1, + self.private_ip_address2] + ) + + self.private_ip_address3 = PrivateIPAddress( + private_ip_address='10.0.1.10', primary=False) + self.private_ip_address4 = PrivateIPAddress( + private_ip_address='10.0.1.11', primary=False) + self.network_interfaces_spec2 = NetworkInterfaceSpecification( + device_index=2, subnet_id='subnet_id2', + description='description2', + groups=['group_id1', 'group_id2'], + private_ip_address='10.0.1.54', delete_on_termination=False, + private_ip_addresses=[self.private_ip_address3, + self.private_ip_address4] + ) + + self.network_interfaces_spec3 = NetworkInterfaceSpecification( + device_index=0, subnet_id='subnet_id2', + description='description2', + groups=['group_id1', 'group_id2'], + private_ip_address='10.0.1.54', delete_on_termination=False, + private_ip_addresses=[self.private_ip_address3, + self.private_ip_address4], + associate_public_ip_address=True + ) + + def test_param_serialization(self): + collection = NetworkInterfaceCollection(self.network_interfaces_spec1, + self.network_interfaces_spec2) + params = {} + collection.build_list_params(params) + self.assertDictEqual(params, { + 'NetworkInterface.0.DeviceIndex': '1', + 'NetworkInterface.0.DeleteOnTermination': 'false', + 'NetworkInterface.0.Description': 'description1', + 'NetworkInterface.0.PrivateIpAddress': '10.0.0.54', + 'NetworkInterface.0.SubnetId': 'subnet_id', + 'NetworkInterface.0.PrivateIpAddresses.0.Primary': 'false', + 'NetworkInterface.0.PrivateIpAddresses.0.PrivateIpAddress': + '10.0.0.10', + 'NetworkInterface.0.PrivateIpAddresses.1.Primary': 'false', + 'NetworkInterface.0.PrivateIpAddresses.1.PrivateIpAddress': + '10.0.0.11', + 'NetworkInterface.1.DeviceIndex': '2', + 'NetworkInterface.1.Description': 'description2', + 'NetworkInterface.1.DeleteOnTermination': 'false', + 'NetworkInterface.1.PrivateIpAddress': '10.0.1.54', + 'NetworkInterface.1.SubnetId': 'subnet_id2', + 'NetworkInterface.1.SecurityGroupId.0': 'group_id1', + 'NetworkInterface.1.SecurityGroupId.1': 'group_id2', + 'NetworkInterface.1.PrivateIpAddresses.0.Primary': 'false', + 'NetworkInterface.1.PrivateIpAddresses.0.PrivateIpAddress': + '10.0.1.10', + 'NetworkInterface.1.PrivateIpAddresses.1.Primary': 'false', + 'NetworkInterface.1.PrivateIpAddresses.1.PrivateIpAddress': + '10.0.1.11', + }) + + def test_add_prefix_to_serialization(self): + collection = NetworkInterfaceCollection(self.network_interfaces_spec1, + self.network_interfaces_spec2) + params = {} + collection.build_list_params(params, prefix='LaunchSpecification.') + # We already tested the actual serialization previously, so + # we're just checking a few keys to make sure we get the proper + # prefix. + self.assertDictEqual(params, { + 'LaunchSpecification.NetworkInterface.0.DeviceIndex': '1', + 'LaunchSpecification.NetworkInterface.0.DeleteOnTermination': + 'false', + 'LaunchSpecification.NetworkInterface.0.Description': + 'description1', + 'LaunchSpecification.NetworkInterface.0.PrivateIpAddress': + '10.0.0.54', + 'LaunchSpecification.NetworkInterface.0.SubnetId': 'subnet_id', + 'LaunchSpecification.NetworkInterface.0.PrivateIpAddresses.0.Primary': + 'false', + 'LaunchSpecification.NetworkInterface.0.PrivateIpAddresses.0.PrivateIpAddress': + '10.0.0.10', + 'LaunchSpecification.NetworkInterface.0.PrivateIpAddresses.1.Primary': 'false', + 'LaunchSpecification.NetworkInterface.0.PrivateIpAddresses.1.PrivateIpAddress': + '10.0.0.11', + 'LaunchSpecification.NetworkInterface.1.DeviceIndex': '2', + 'LaunchSpecification.NetworkInterface.1.Description': + 'description2', + 'LaunchSpecification.NetworkInterface.1.DeleteOnTermination': + 'false', + 'LaunchSpecification.NetworkInterface.1.PrivateIpAddress': + '10.0.1.54', + 'LaunchSpecification.NetworkInterface.1.SubnetId': 'subnet_id2', + 'LaunchSpecification.NetworkInterface.1.SecurityGroupId.0': + 'group_id1', + 'LaunchSpecification.NetworkInterface.1.SecurityGroupId.1': + 'group_id2', + 'LaunchSpecification.NetworkInterface.1.PrivateIpAddresses.0.Primary': + 'false', + 'LaunchSpecification.NetworkInterface.1.PrivateIpAddresses.0.PrivateIpAddress': + '10.0.1.10', + 'LaunchSpecification.NetworkInterface.1.PrivateIpAddresses.1.Primary': + 'false', + 'LaunchSpecification.NetworkInterface.1.PrivateIpAddresses.1.PrivateIpAddress': + '10.0.1.11', + }) + + def test_cant_use_public_ip(self): + collection = NetworkInterfaceCollection(self.network_interfaces_spec3, + self.network_interfaces_spec1) + params = {} + + # First, verify we can't incorrectly create multiple interfaces with + # on having a public IP. + with self.assertRaises(BotoClientError): + collection.build_list_params(params, prefix='LaunchSpecification.') + + # Next, ensure it can't be on device index 1. + self.network_interfaces_spec3.device_index = 1 + collection = NetworkInterfaceCollection(self.network_interfaces_spec3) + params = {} + + with self.assertRaises(BotoClientError): + collection.build_list_params(params, prefix='LaunchSpecification.') + + def test_public_ip(self): + # With public IP. + collection = NetworkInterfaceCollection(self.network_interfaces_spec3) + params = {} + collection.build_list_params(params, prefix='LaunchSpecification.') + + self.assertDictEqual(params, { + 'LaunchSpecification.NetworkInterface.0.AssociatePublicIpAddress': + 'true', + 'LaunchSpecification.NetworkInterface.0.DeviceIndex': '0', + 'LaunchSpecification.NetworkInterface.0.DeleteOnTermination': + 'false', + 'LaunchSpecification.NetworkInterface.0.Description': + 'description2', + 'LaunchSpecification.NetworkInterface.0.PrivateIpAddress': + '10.0.1.54', + 'LaunchSpecification.NetworkInterface.0.SubnetId': 'subnet_id2', + 'LaunchSpecification.NetworkInterface.0.PrivateIpAddresses.0.Primary': + 'false', + 'LaunchSpecification.NetworkInterface.0.PrivateIpAddresses.0.PrivateIpAddress': + '10.0.1.10', + 'LaunchSpecification.NetworkInterface.0.PrivateIpAddresses.1.Primary': + 'false', + 'LaunchSpecification.NetworkInterface.0.PrivateIpAddresses.1.PrivateIpAddress': + '10.0.1.11', + 'LaunchSpecification.NetworkInterface.0.SecurityGroupId.0': + 'group_id1', + 'LaunchSpecification.NetworkInterface.0.SecurityGroupId.1': + 'group_id2', + }) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_reservedinstance.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_reservedinstance.py new file mode 100644 index 0000000000000000000000000000000000000000..c1ec688b92e3bdd3a86a64b3e28bb7fff42890b8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_reservedinstance.py @@ -0,0 +1,44 @@ +from tests.unit import AWSMockServiceTestCase +from boto.ec2.connection import EC2Connection +from boto.ec2.reservedinstance import ReservedInstance + + +class TestReservedInstancesSet(AWSMockServiceTestCase): + connection_class = EC2Connection + + def default_body(self): + return b""" + + + ididididid + t1.micro + 2014-05-03T14:10:10.944Z + 2014-05-03T14:10:11.000Z + 64800000 + 62.5 + 0.0 + 5 + Linux/UNIX + retired + default + USD + Heavy Utilization + + + Hourly + 0.005 + + + +""" + + def test_get_all_reserved_instaces(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_reserved_instances() + + self.assertEqual(len(response), 1) + self.assertTrue(isinstance(response[0], ReservedInstance)) + self.assertEquals(response[0].id, 'ididididid') + self.assertEquals(response[0].instance_count, 5) + self.assertEquals(response[0].start, '2014-05-03T14:10:10.944Z') + self.assertEquals(response[0].end, '2014-05-03T14:10:11.000Z') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_securitygroup.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_securitygroup.py new file mode 100644 index 0000000000000000000000000000000000000000..78c634cf9b575a9ef7f49ceda184b954739d8ec8 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_securitygroup.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python + +from tests.compat import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.ec2.connection import EC2Connection +from boto.ec2.securitygroup import SecurityGroup + + +DESCRIBE_SECURITY_GROUP = br""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + 111122223333 + sg-1a2b3c4d + WebServers + Web Servers + + + + tcp + 80 + 80 + + + + 0.0.0.0/0 + + + + + + + + 111122223333 + sg-2a2b3c4d + RangedPortsBySource + Group A + + + tcp + 6000 + 7000 + + + 111122223333 + sg-3a2b3c4d + Group B + + + + + + + + +""" + +DESCRIBE_INSTANCES = br""" + + c6132c74-b524-4884-87f5-0f4bde4a9760 + + + r-72ef4a0a + 184906166255 + + + + i-instance + ami-1624987f + + 16 + running + + + + + mykeypair + 0 + + m1.small + 2012-12-14T23:48:37.000Z + + us-east-1d + + default + + aki-88aa75e1 + + disabled + + subnet-0dc60667 + vpc-id + 10.0.0.67 + true + + + sg-1a2b3c4d + WebServerSG + + + x86_64 + ebs + /dev/sda1 + + + /dev/sda1 + + vol-id + attached + 2012-12-14T23:48:43.000Z + true + + + + paravirtual + foo + + + Name + + + + xen + + + eni-id + subnet-id + vpc-id + Primary network interface + ownerid + in-use + 10.0.0.67 + true + + + sg-id + WebServerSG + + + + eni-attach-id + 0 + attached + 2012-12-14T23:48:37.000Z + true + + + + 10.0.0.67 + true + + + 10.0.0.54 + false + + + 10.0.0.55 + false + + + + + false + + + + + +""" + + +class TestDescribeSecurityGroups(AWSMockServiceTestCase): + connection_class = EC2Connection + + def test_get_instances(self): + self.set_http_response(status_code=200, body=DESCRIBE_SECURITY_GROUP) + groups = self.service_connection.get_all_security_groups() + + self.set_http_response(status_code=200, body=DESCRIBE_INSTANCES) + instances = groups[0].instances() + + self.assertEqual(1, len(instances)) + self.assertEqual(groups[0].id, instances[0].groups[0].id) + + +class SecurityGroupTest(unittest.TestCase): + def test_add_rule(self): + sg = SecurityGroup() + self.assertEqual(len(sg.rules), 0) + + # Regression: ``dry_run`` was being passed (but unhandled) before. + sg.add_rule( + ip_protocol='http', + from_port='80', + to_port='8080', + src_group_name='groupy', + src_group_owner_id='12345', + cidr_ip='10.0.0.1', + src_group_group_id='54321', + dry_run=False + ) + self.assertEqual(len(sg.rules), 1) + + def test_remove_rule_on_empty_group(self): + # Remove a rule from a group with no rules + sg = SecurityGroup() + + with self.assertRaises(ValueError): + sg.remove_rule('ip', 80, 80, None, None, None, None) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_snapshot.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_snapshot.py new file mode 100644 index 0000000000000000000000000000000000000000..56af6bce36c9105a8913b90f1db29048b999b87b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_snapshot.py @@ -0,0 +1,61 @@ +from tests.compat import OrderedDict +from tests.unit import AWSMockServiceTestCase + +from boto.ec2.connection import EC2Connection +from boto.ec2.snapshot import Snapshot + + +class TestDescribeSnapshots(AWSMockServiceTestCase): + + connection_class = EC2Connection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + snap-1a2b3c4d + vol-1a2b3c4d + pending + YYYY-MM-DDTHH:MM:SS.SSSZ + 30% + 111122223333 + 15 + Daily Backup + + + Purpose + demo_db_14_backup + + + false + + + + """ + + def test_describe_snapshots(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_snapshots(['snap-1a2b3c4d', 'snap-9f8e7d6c'], + owner=['self', '111122223333'], + restorable_by='999988887777', + filters=OrderedDict((('status', 'pending'), + ('tag-value', '*db_*')))) + self.assert_request_parameters({ + 'Action': 'DescribeSnapshots', + 'SnapshotId.1': 'snap-1a2b3c4d', + 'SnapshotId.2': 'snap-9f8e7d6c', + 'Owner.1': 'self', + 'Owner.2': '111122223333', + 'RestorableBy.1': '999988887777', + 'Filter.1.Name': 'status', + 'Filter.1.Value.1': 'pending', + 'Filter.2.Name': 'tag-value', + 'Filter.2.Value.1': '*db_*'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(len(response), 1) + self.assertIsInstance(response[0], Snapshot) + self.assertEqual(response[0].id, 'snap-1a2b3c4d') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_spotinstance.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_spotinstance.py new file mode 100644 index 0000000000000000000000000000000000000000..b4c81caad1ae09833c428fb141ff9c7608c84243 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_spotinstance.py @@ -0,0 +1,110 @@ +from tests.unit import AWSMockServiceTestCase + +from boto.ec2.connection import EC2Connection + + +class TestCancelSpotInstanceRequests(AWSMockServiceTestCase): + + connection_class = EC2Connection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + sir-1a2b3c4d + cancelled + + + sir-9f8e7d6c + cancelled + + + + """ + + def test_cancel_spot_instance_requests(self): + self.set_http_response(status_code=200) + response = self.service_connection.cancel_spot_instance_requests(['sir-1a2b3c4d', + 'sir-9f8e7d6c']) + self.assert_request_parameters({ + 'Action': 'CancelSpotInstanceRequests', + 'SpotInstanceRequestId.1': 'sir-1a2b3c4d', + 'SpotInstanceRequestId.2': 'sir-9f8e7d6c'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(len(response), 2) + self.assertEqual(response[0].id, 'sir-1a2b3c4d') + self.assertEqual(response[0].state, 'cancelled') + self.assertEqual(response[1].id, 'sir-9f8e7d6c') + self.assertEqual(response[1].state, 'cancelled') + + +class TestGetSpotPriceHistory(AWSMockServiceTestCase): + + connection_class = EC2Connection + + def default_body(self): + return b""" + + b6c6978c-bd13-4ad7-9bc8-6f0ac9d32bcc + + + c3.large + Linux/UNIX + 0.032000 + 2013-12-28T12:17:43.000Z + us-west-2c + + + c3.large + Windows (Amazon VPC) + 0.104000 + 2013-12-28T07:49:40.000Z + us-west-2b + + + q5GwEl5bMGjKq6YmhpDLJ7hEwyWU54jJC2GQ93n61vZV4s1+fzZ674xzvUlTihrl + + """ + + def test_get_spot_price_history(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_spot_price_history( + instance_type='c3.large') + self.assert_request_parameters({ + 'Action': 'DescribeSpotPriceHistory', + 'InstanceType': 'c3.large'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(len(response), 2) + self.assertEqual(response.next_token, + 'q5GwEl5bMGjKq6YmhpDLJ7hEwyWU54jJC2GQ93n61vZV4s1+fzZ674xzvUlTihrl') + self.assertEqual(response.nextToken, + 'q5GwEl5bMGjKq6YmhpDLJ7hEwyWU54jJC2GQ93n61vZV4s1+fzZ674xzvUlTihrl') + self.assertEqual(response[0].instance_type, 'c3.large') + self.assertEqual(response[0].availability_zone, 'us-west-2c') + self.assertEqual(response[1].instance_type, 'c3.large') + self.assertEqual(response[1].availability_zone, 'us-west-2b') + + response = self.service_connection.get_spot_price_history( + filters={'instance-type': 'c3.large'}) + self.assert_request_parameters({ + 'Action': 'DescribeSpotPriceHistory', + 'Filter.1.Name': 'instance-type', + 'Filter.1.Value.1': 'c3.large'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + + response = self.service_connection.get_spot_price_history( + next_token='foobar') + self.assert_request_parameters({ + 'Action': 'DescribeSpotPriceHistory', + 'NextToken': 'foobar'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_volume.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_volume.py new file mode 100644 index 0000000000000000000000000000000000000000..81d7f5525794c56b803d53c76f6cbe54e481a5bb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ec2/test_volume.py @@ -0,0 +1,268 @@ +from tests.compat import mock, unittest + +from boto.ec2.snapshot import Snapshot +from boto.ec2.tag import Tag, TagSet +from boto.ec2.volume import Volume, AttachmentSet, VolumeAttribute + + +class VolumeTests(unittest.TestCase): + def setUp(self): + self.attach_data = AttachmentSet() + self.attach_data.id = 1 + self.attach_data.instance_id = 2 + self.attach_data.status = "some status" + self.attach_data.attach_time = 5 + self.attach_data.device = "/dev/null" + + self.volume_one = Volume() + self.volume_one.id = 1 + self.volume_one.create_time = 5 + self.volume_one.status = "one_status" + self.volume_one.size = "one_size" + self.volume_one.snapshot_id = 1 + self.volume_one.attach_data = self.attach_data + self.volume_one.zone = "one_zone" + + self.volume_two = Volume() + self.volume_two.connection = mock.Mock() + self.volume_two.id = 1 + self.volume_two.create_time = 6 + self.volume_two.status = "two_status" + self.volume_two.size = "two_size" + self.volume_two.snapshot_id = 2 + self.volume_two.attach_data = None + self.volume_two.zone = "two_zone" + + @mock.patch("boto.ec2.volume.TaggedEC2Object.startElement") + def test_startElement_calls_TaggedEC2Object_startElement_with_correct_args(self, startElement): + volume = Volume() + volume.startElement("some name", "some attrs", None) + startElement.assert_called_with( + "some name", + "some attrs", + None + ) + + @mock.patch("boto.ec2.volume.TaggedEC2Object.startElement") + def test_startElement_retval_not_None_returns_correct_thing(self, startElement): + tag_set = mock.Mock(TagSet) + startElement.return_value = tag_set + volume = Volume() + retval = volume.startElement(None, None, None) + self.assertEqual(retval, tag_set) + + @mock.patch("boto.ec2.volume.TaggedEC2Object.startElement") + @mock.patch("boto.resultset.ResultSet") + def test_startElement_with_name_tagSet_calls_ResultSet(self, ResultSet, startElement): + startElement.return_value = None + result_set = mock.Mock(ResultSet([("item", Tag)])) + volume = Volume() + volume.tags = result_set + retval = volume.startElement("tagSet", None, None) + self.assertEqual(retval, volume.tags) + + @mock.patch("boto.ec2.volume.TaggedEC2Object.startElement") + def test_startElement_with_name_attachmentSet_returns_AttachmentSet(self, startElement): + startElement.return_value = None + attach_data = AttachmentSet() + volume = Volume() + volume.attach_data = attach_data + retval = volume.startElement("attachmentSet", None, None) + self.assertEqual(retval, volume.attach_data) + + @mock.patch("boto.ec2.volume.TaggedEC2Object.startElement") + def test_startElement_else_returns_None(self, startElement): + startElement.return_value = None + volume = Volume() + retval = volume.startElement("not tagSet or attachmentSet", None, None) + self.assertEqual(retval, None) + + def check_that_attribute_has_been_set(self, name, value, attribute, obj_value=None): + volume = Volume() + volume.endElement(name, value, None) + expected_value = obj_value if obj_value is not None else value + self.assertEqual(getattr(volume, attribute), expected_value) + + def test_endElement_sets_correct_attributes_with_values(self): + for arguments in [("volumeId", "some value", "id"), + ("createTime", "some time", "create_time"), + ("status", "some status", "status"), + ("size", 5, "size"), + ("snapshotId", 1, "snapshot_id"), + ("availabilityZone", "some zone", "zone"), + ("someName", "some value", "someName"), + ("encrypted", "true", "encrypted", True)]: + self.check_that_attribute_has_been_set(*arguments) + + def test_endElement_with_name_status_and_empty_string_value_doesnt_set_status(self): + volume = Volume() + volume.endElement("status", "", None) + self.assertNotEqual(volume.status, "") + + def test_update_with_result_set_greater_than_0_updates_dict(self): + self.volume_two.connection.get_all_volumes.return_value = [self.volume_one] + self.volume_two.update() + + assert all([self.volume_two.create_time == 5, + self.volume_two.status == "one_status", + self.volume_two.size == "one_size", + self.volume_two.snapshot_id == 1, + self.volume_two.attach_data == self.attach_data, + self.volume_two.zone == "one_zone"]) + + def test_update_with_validate_true_raises_value_error(self): + self.volume_one.connection = mock.Mock() + self.volume_one.connection.get_all_volumes.return_value = [] + with self.assertRaisesRegexp(ValueError, "^1 is not a valid Volume ID$"): + self.volume_one.update(True) + + def test_update_returns_status(self): + self.volume_one.connection = mock.Mock() + self.volume_one.connection.get_all_volumes.return_value = [self.volume_two] + retval = self.volume_one.update() + self.assertEqual(retval, "two_status") + + def test_delete_calls_delete_volume(self): + self.volume_one.connection = mock.Mock() + self.volume_one.delete() + self.volume_one.connection.delete_volume.assert_called_with( + 1, + dry_run=False + ) + + def test_attach_calls_attach_volume(self): + self.volume_one.connection = mock.Mock() + self.volume_one.attach("instance_id", "/dev/null") + self.volume_one.connection.attach_volume.assert_called_with( + 1, + "instance_id", + "/dev/null", + dry_run=False + ) + + def test_detach_calls_detach_volume(self): + self.volume_one.connection = mock.Mock() + self.volume_one.detach() + self.volume_one.connection.detach_volume.assert_called_with( + 1, 2, "/dev/null", False, dry_run=False) + + def test_detach_with_no_attach_data(self): + self.volume_two.connection = mock.Mock() + self.volume_two.detach() + self.volume_two.connection.detach_volume.assert_called_with( + 1, None, None, False, dry_run=False) + + def test_detach_with_force_calls_detach_volume_with_force(self): + self.volume_one.connection = mock.Mock() + self.volume_one.detach(True) + self.volume_one.connection.detach_volume.assert_called_with( + 1, 2, "/dev/null", True, dry_run=False) + + def test_create_snapshot_calls_connection_create_snapshot(self): + self.volume_one.connection = mock.Mock() + self.volume_one.create_snapshot() + self.volume_one.connection.create_snapshot.assert_called_with( + 1, + None, + dry_run=False + ) + + def test_create_snapshot_with_description(self): + self.volume_one.connection = mock.Mock() + self.volume_one.create_snapshot("some description") + self.volume_one.connection.create_snapshot.assert_called_with( + 1, + "some description", + dry_run=False + ) + + def test_volume_state_returns_status(self): + retval = self.volume_one.volume_state() + self.assertEqual(retval, "one_status") + + def test_attachment_state_returns_state(self): + retval = self.volume_one.attachment_state() + self.assertEqual(retval, "some status") + + def test_attachment_state_no_attach_data_returns_None(self): + retval = self.volume_two.attachment_state() + self.assertEqual(retval, None) + + def test_snapshots_returns_snapshots(self): + snapshot_one = Snapshot() + snapshot_one.volume_id = 1 + snapshot_two = Snapshot() + snapshot_two.volume_id = 2 + + self.volume_one.connection = mock.Mock() + self.volume_one.connection.get_all_snapshots.return_value = [snapshot_one, snapshot_two] + retval = self.volume_one.snapshots() + self.assertEqual(retval, [snapshot_one]) + + def test_snapshots__with_owner_and_restorable_by(self): + self.volume_one.connection = mock.Mock() + self.volume_one.connection.get_all_snapshots.return_value = [] + self.volume_one.snapshots("owner", "restorable_by") + self.volume_one.connection.get_all_snapshots.assert_called_with( + owner="owner", restorable_by="restorable_by", dry_run=False) + + +class AttachmentSetTests(unittest.TestCase): + def check_that_attribute_has_been_set(self, name, value, attribute): + attachment_set = AttachmentSet() + attachment_set.endElement(name, value, None) + self.assertEqual(getattr(attachment_set, attribute), value) + + def test_endElement_with_name_volumeId_sets_id(self): + return self.check_that_attribute_has_been_set("volumeId", "some value", "id") + + def test_endElement_with_name_instanceId_sets_instance_id(self): + return self.check_that_attribute_has_been_set("instanceId", 1, "instance_id") + + def test_endElement_with_name_status_sets_status(self): + return self.check_that_attribute_has_been_set("status", "some value", "status") + + def test_endElement_with_name_attachTime_sets_attach_time(self): + return self.check_that_attribute_has_been_set("attachTime", 5, "attach_time") + + def test_endElement_with_name_device_sets_device(self): + return self.check_that_attribute_has_been_set("device", "/dev/null", "device") + + def test_endElement_with_other_name_sets_other_name_attribute(self): + return self.check_that_attribute_has_been_set("someName", "some value", "someName") + + +class VolumeAttributeTests(unittest.TestCase): + def setUp(self): + self.volume_attribute = VolumeAttribute() + self.volume_attribute._key_name = "key_name" + self.volume_attribute.attrs = {"key_name": False} + + def test_startElement_with_name_autoEnableIO_sets_key_name(self): + self.volume_attribute.startElement("autoEnableIO", None, None) + self.assertEqual(self.volume_attribute._key_name, "autoEnableIO") + + def test_startElement_without_name_autoEnableIO_returns_None(self): + retval = self.volume_attribute.startElement("some name", None, None) + self.assertEqual(retval, None) + + def test_endElement_with_name_value_and_value_true_sets_attrs_key_name_True(self): + self.volume_attribute.endElement("value", "true", None) + self.assertEqual(self.volume_attribute.attrs['key_name'], True) + + def test_endElement_with_name_value_and_value_false_sets_attrs_key_name_False(self): + self.volume_attribute._key_name = "other_key_name" + self.volume_attribute.endElement("value", "false", None) + self.assertEqual(self.volume_attribute.attrs['other_key_name'], False) + + def test_endElement_with_name_volumeId_sets_id(self): + self.volume_attribute.endElement("volumeId", "some_value", None) + self.assertEqual(self.volume_attribute.id, "some_value") + + def test_endElement_with_other_name_sets_other_name_attribute(self): + self.volume_attribute.endElement("someName", "some value", None) + self.assertEqual(self.volume_attribute.someName, "some value") + + +if __name__ == "__main__": + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ecs/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ecs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ecs/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ecs/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..99ad33ef49fec8c69dcf20a453aeca53fd41fcf5 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ecs/test_connection.py @@ -0,0 +1,70 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.unit import unittest +from boto.ecs import ECSConnection +from tests.unit import AWSMockServiceTestCase + + +class TestECSConnection(AWSMockServiceTestCase): + connection_class = ECSConnection + + def default_body(self): + return b""" + + + True + + B00008OE6I + + + + B00008OE6I + + Canon + Photography + Canon PowerShot S400 4MP Digital Camera w/ 3x Optical Zoom + + + + """ + + def test_item_lookup(self): + self.set_http_response(status_code=200) + item_set = self.service_connection.item_lookup( + ItemId='0316067938', + ResponseGroup='Reviews' + ) + + self.assert_request_parameters( + {'ItemId': '0316067938', + 'Operation': 'ItemLookup', + 'ResponseGroup': 'Reviews', + 'Service': 'AWSECommerceService'}, + ignore_params_values=['Version', 'AWSAccessKeyId', + 'SignatureMethod', 'SignatureVersion', + 'Timestamp']) + + items = list(item_set) + self.assertEqual(len(items), 1) + self.assertTrue(item_set.is_valid) + self.assertEqual(items[0].ASIN, 'B00008OE6I') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/elasticache/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/elasticache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/elasticache/test_api_interface.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/elasticache/test_api_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..367c317a1e6798d7c4acdebf1e4246a12560359b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/elasticache/test_api_interface.py @@ -0,0 +1,20 @@ +from boto.elasticache.layer1 import ElastiCacheConnection +from tests.unit import AWSMockServiceTestCase + + +class TestAPIInterface(AWSMockServiceTestCase): + connection_class = ElastiCacheConnection + + def test_required_launch_params(self): + """ Make sure only the AWS required params are required by boto """ + name = 'test_cache_cluster' + self.set_http_response(status_code=200, body=b'{}') + self.service_connection.create_cache_cluster(name) + + self.assert_request_parameters({ + 'Action': 'CreateCacheCluster', + 'CacheClusterId': name, + }, ignore_params_values=[ + 'Version', + 'ContentType', + ]) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/emr/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/emr/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..84d3ff8f1f410d6476a93a4cb168acb034bc57e2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/emr/test_connection.py @@ -0,0 +1,1004 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto.utils + +from datetime import datetime +from time import time +from tests.unit import AWSMockServiceTestCase + +from boto.emr.connection import EmrConnection +from boto.emr.emrobject import BootstrapAction, BootstrapActionList, \ + ClusterStateChangeReason, ClusterStatus, ClusterSummaryList, \ + ClusterSummary, ClusterTimeline, InstanceInfo, \ + InstanceList, InstanceGroupInfo, \ + InstanceGroup, InstanceGroupList, JobFlow, \ + JobFlowStepList, Step, StepSummaryList, \ + Cluster, RunJobFlowResponse + +# These tests are just checking the basic structure of +# the Elastic MapReduce code, by picking a few calls +# and verifying we get the expected results with mocked +# responses. The integration tests actually verify the +# API calls interact with the service correctly. +class TestListClusters(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return b""" + + + + + j-aaaaaaaaaaaa + + + Terminated by user request + USER_REQUEST + + TERMINATED + + 2014-01-24T01:21:21Z + 2014-01-24T01:25:26Z + 2014-01-24T02:19:46Z + + + analytics test + 10 + + + j-aaaaaaaaaaaab + + + Terminated by user request + USER_REQUEST + + TERMINATED + + 2014-01-21T02:53:08Z + 2014-01-21T02:56:40Z + 2014-01-21T03:40:22Z + + + test job + 20 + + + + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + """ + + def test_list_clusters(self): + self.set_http_response(status_code=200) + response = self.service_connection.list_clusters() + + self.assert_request_parameters({ + 'Action': 'ListClusters', + 'Version': '2009-03-31', + }) + + self.assertTrue(isinstance(response, ClusterSummaryList)) + + self.assertEqual(len(response.clusters), 2) + + self.assertTrue(isinstance(response.clusters[0], ClusterSummary)) + self.assertEqual(response.clusters[0].name, 'analytics test') + self.assertEqual(response.clusters[0].normalizedinstancehours, '10') + + self.assertTrue(isinstance(response.clusters[0].status, ClusterStatus)) + self.assertEqual(response.clusters[0].status.state, 'TERMINATED') + + self.assertTrue(isinstance(response.clusters[0].status.timeline, ClusterTimeline)) + + self.assertEqual(response.clusters[0].status.timeline.creationdatetime, '2014-01-24T01:21:21Z') + self.assertEqual(response.clusters[0].status.timeline.readydatetime, '2014-01-24T01:25:26Z') + self.assertEqual(response.clusters[0].status.timeline.enddatetime, '2014-01-24T02:19:46Z') + + self.assertTrue(isinstance(response.clusters[0].status.statechangereason, ClusterStateChangeReason)) + self.assertEqual(response.clusters[0].status.statechangereason.code, 'USER_REQUEST') + self.assertEqual(response.clusters[0].status.statechangereason.message, 'Terminated by user request') + + def test_list_clusters_created_before(self): + self.set_http_response(status_code=200) + + date = datetime.now() + response = self.service_connection.list_clusters(created_before=date) + + self.assert_request_parameters({ + 'Action': 'ListClusters', + 'CreatedBefore': date.strftime(boto.utils.ISO8601), + 'Version': '2009-03-31' + }) + + def test_list_clusters_created_after(self): + self.set_http_response(status_code=200) + + date = datetime.now() + response = self.service_connection.list_clusters(created_after=date) + + self.assert_request_parameters({ + 'Action': 'ListClusters', + 'CreatedAfter': date.strftime(boto.utils.ISO8601), + 'Version': '2009-03-31' + }) + + def test_list_clusters_states(self): + self.set_http_response(status_code=200) + response = self.service_connection.list_clusters(cluster_states=[ + 'RUNNING', + 'WAITING' + ]) + + self.assert_request_parameters({ + 'Action': 'ListClusters', + 'ClusterStates.member.1': 'RUNNING', + 'ClusterStates.member.2': 'WAITING', + 'Version': '2009-03-31' + }) + + +class TestListInstanceGroups(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return b""" + + + + + ig-aaaaaaaaaaaaa + m1.large + ON_DEMAND + + + Job flow terminated + CLUSTER_TERMINATED + + TERMINATED + + 2014-01-24T01:21:21Z + 2014-01-24T01:25:08Z + 2014-01-24T02:19:46Z + + + Master instance group + 1 + 0 + MASTER + + + ig-aaaaaaaaaaab + m1.large + ON_DEMAND + + + Job flow terminated + CLUSTER_TERMINATED + + TERMINATED + + 2014-01-24T01:21:21Z + 2014-01-24T01:25:26Z + 2014-01-24T02:19:46Z + + + Core instance group + 2 + 0 + CORE + + + + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + +""" + + def test_list_instance_groups(self): + self.set_http_response(200) + + with self.assertRaises(TypeError): + self.service_connection.list_instance_groups() + + response = self.service_connection.list_instance_groups(cluster_id='j-123') + + self.assert_request_parameters({ + 'Action': 'ListInstanceGroups', + 'ClusterId': 'j-123', + 'Version': '2009-03-31' + }) + + self.assertTrue(isinstance(response, InstanceGroupList)) + self.assertEqual(len(response.instancegroups), 2) + self.assertTrue(isinstance(response.instancegroups[0], InstanceGroupInfo)) + self.assertEqual(response.instancegroups[0].id, 'ig-aaaaaaaaaaaaa') + self.assertEqual(response.instancegroups[0].instancegrouptype, "MASTER") + self.assertEqual(response.instancegroups[0].instancetype, "m1.large") + self.assertEqual(response.instancegroups[0].market, "ON_DEMAND") + self.assertEqual(response.instancegroups[0].name, "Master instance group") + self.assertEqual(response.instancegroups[0].requestedinstancecount, '1') + self.assertEqual(response.instancegroups[0].runninginstancecount, '0') + self.assertTrue(isinstance(response.instancegroups[0].status, ClusterStatus)) + self.assertEqual(response.instancegroups[0].status.state, 'TERMINATED') + # status.statechangereason is not parsed into an object + #self.assertEqual(response.instancegroups[0].status.statechangereason.code, 'CLUSTER_TERMINATED') + +class TestListInstances(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return b""" + + + + + ci-123456789abc + + + Cluster was terminated. + CLUSTER_TERMINATED + + TERMINATED + + 2014-01-24T01:21:26Z + 2014-01-24T01:25:25Z + 2014-01-24T02:19:46Z + + + ip-10-0-0-60.us-west-1.compute.internal + 54.0.0.1 + ec2-54-0-0-1.us-west-1.compute.amazonaws.com + i-aaaaaaaa + 10.0.0.60 + + + ci-123456789abd + + + Cluster was terminated. + CLUSTER_TERMINATED + + TERMINATED + + 2014-01-24T01:21:26Z + 2014-01-24T01:25:25Z + 2014-01-24T02:19:46Z + + + ip-10-0-0-61.us-west-1.compute.internal + 54.0.0.2 + ec2-54-0-0-2.us-west-1.compute.amazonaws.com + i-aaaaaaab + 10.0.0.61 + + + ci-123456789abe3 + + + Cluster was terminated. + CLUSTER_TERMINATED + + TERMINATED + + 2014-01-24T01:21:33Z + 2014-01-24T01:25:08Z + 2014-01-24T02:19:46Z + + + ip-10-0-0-62.us-west-1.compute.internal + 54.0.0.3 + ec2-54-0-0-3.us-west-1.compute.amazonaws.com + i-aaaaaaac + 10.0.0.62 + + + + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + """ + + def test_list_instances(self): + self.set_http_response(200) + + with self.assertRaises(TypeError): + self.service_connection.list_instances() + + response = self.service_connection.list_instances(cluster_id='j-123') + self.assertTrue(isinstance(response, InstanceList)) + self.assertEqual(len(response.instances), 3) + self.assertTrue(isinstance(response.instances[0], InstanceInfo)) + self.assertEqual(response.instances[0].ec2instanceid, 'i-aaaaaaaa') + self.assertEqual(response.instances[0].id, 'ci-123456789abc') + self.assertEqual(response.instances[0].privatednsname , 'ip-10-0-0-60.us-west-1.compute.internal') + self.assertEqual(response.instances[0].privateipaddress , '10.0.0.60') + self.assertEqual(response.instances[0].publicdnsname , 'ec2-54-0-0-1.us-west-1.compute.amazonaws.com') + self.assertEqual(response.instances[0].publicipaddress , '54.0.0.1') + + + self.assert_request_parameters({ + 'Action': 'ListInstances', + 'ClusterId': 'j-123', + 'Version': '2009-03-31' + }) + + def test_list_instances_with_group_id(self): + self.set_http_response(200) + response = self.service_connection.list_instances( + cluster_id='j-123', instance_group_id='abc') + + self.assert_request_parameters({ + 'Action': 'ListInstances', + 'ClusterId': 'j-123', + 'InstanceGroupId': 'abc', + 'Version': '2009-03-31' + }) + + def test_list_instances_with_types(self): + self.set_http_response(200) + + response = self.service_connection.list_instances( + cluster_id='j-123', instance_group_types=[ + 'MASTER', + 'TASK' + ]) + + self.assert_request_parameters({ + 'Action': 'ListInstances', + 'ClusterId': 'j-123', + 'InstanceGroupTypeList.member.1': 'MASTER', + 'InstanceGroupTypeList.member.2': 'TASK', + 'Version': '2009-03-31' + }) + + +class TestListSteps(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return b""" + + + + abc123 + + + + 2014-07-01T00:00:00.000Z + + PENDING + + Step 1 + + /home/hadoop/lib/emr-s3distcp-1.0.jar + + --src + hdfs:///data/test/ + --dest + s3n://test/data + + + + CONTINUE + + + def456 + + + + 2014-07-01T00:00:00.000Z + + COMPLETED + + Step 2 + + my.main.SomeClass + s3n://test/jars/foo.jar + + CONTINUE + + + ghi789 + + + + 2014-07-01T00:00:00.000Z + + FAILED + + Step 3 + + s3n://test/jars/bar.jar + + -arg + value + + + + TERMINATE_CLUSTER + + + + + eff31ee5-0342-11e4-b3c7-9de5a93f6fcb + + +""" + + def test_list_steps(self): + self.set_http_response(200) + + with self.assertRaises(TypeError): + self.service_connection.list_steps() + + response = self.service_connection.list_steps(cluster_id='j-123') + + self.assert_request_parameters({ + 'Action': 'ListSteps', + 'ClusterId': 'j-123', + 'Version': '2009-03-31' + }) + self.assertTrue(isinstance(response, StepSummaryList)) + self.assertEqual(response.steps[0].name, 'Step 1') + + valid_states = [ + 'PENDING', + 'RUNNING', + 'COMPLETED', + 'CANCELLED', + 'FAILED', + 'INTERRUPTED' + ] + + # Check for step states + for step in response.steps: + self.assertIn(step.status.state, valid_states) + + # Check for step config + step = response.steps[0] + self.assertEqual(step.config.jar, + '/home/hadoop/lib/emr-s3distcp-1.0.jar') + self.assertEqual(len(step.config.args), 4) + self.assertEqual(step.config.args[0].value, '--src') + self.assertEqual(step.config.args[1].value, 'hdfs:///data/test/') + + step = response.steps[1] + self.assertEqual(step.config.mainclass, 'my.main.SomeClass') + + def test_list_steps_with_states(self): + self.set_http_response(200) + response = self.service_connection.list_steps( + cluster_id='j-123', step_states=[ + 'COMPLETED', + 'FAILED' + ]) + + self.assert_request_parameters({ + 'Action': 'ListSteps', + 'ClusterId': 'j-123', + 'StepStateList.member.1': 'COMPLETED', + 'StepStateList.member.2': 'FAILED', + 'Version': '2009-03-31' + }) + self.assertTrue(isinstance(response, StepSummaryList)) + self.assertEqual(response.steps[0].name, 'Step 1') + +class TestListBootstrapActions(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return b"""""" + + def test_list_bootstrap_actions(self): + self.set_http_response(200) + + with self.assertRaises(TypeError): + self.service_connection.list_bootstrap_actions() + + response = self.service_connection.list_bootstrap_actions(cluster_id='j-123') + + self.assert_request_parameters({ + 'Action': 'ListBootstrapActions', + 'ClusterId': 'j-123', + 'Version': '2009-03-31' + }) + + +class TestDescribeCluster(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return b""" + + + + j-aaaaaaaaa + + + us-west-1c + my_secret_key + + 2.4.2 + true + + + Terminated by user request + USER_REQUEST + + TERMINATED + + 2014-01-24T01:21:21Z + 2014-01-24T01:25:26Z + 2014-01-24T02:19:46Z + + + false + test analytics + 2.4.2 + + + hadoop + 1.0.3 + + + false + ec2-184-0-0-1.us-west-1.compute.amazonaws.com + 10 + my-service-role + + + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + """ + + def test_describe_cluster(self): + self.set_http_response(200) + + with self.assertRaises(TypeError): + self.service_connection.describe_cluster() + + response = self.service_connection.describe_cluster(cluster_id='j-123') + + self.assertTrue(isinstance(response, Cluster)) + self.assertEqual(response.id, 'j-aaaaaaaaa') + self.assertEqual(response.runningamiversion, '2.4.2') + self.assertEqual(response.visibletoallusers, 'true') + self.assertEqual(response.autoterminate, 'false') + self.assertEqual(response.name, 'test analytics') + self.assertEqual(response.requestedamiversion, '2.4.2') + self.assertEqual(response.terminationprotected, 'false') + self.assertEqual(response.ec2instanceattributes.ec2availabilityzone, "us-west-1c") + self.assertEqual(response.ec2instanceattributes.ec2keyname, 'my_secret_key') + self.assertEqual(response.status.state, 'TERMINATED') + self.assertEqual(response.applications[0].name, 'hadoop') + self.assertEqual(response.applications[0].version, '1.0.3') + self.assertEqual(response.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com') + self.assertEqual(response.normalizedinstancehours, '10') + self.assertEqual(response.servicerole, 'my-service-role') + + self.assert_request_parameters({ + 'Action': 'DescribeCluster', + 'ClusterId': 'j-123', + 'Version': '2009-03-31' + }) + + +class TestDescribeStep(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return b"""""" + + def test_describe_step(self): + self.set_http_response(200) + + with self.assertRaises(TypeError): + self.service_connection.describe_step() + + with self.assertRaises(TypeError): + self.service_connection.describe_step(cluster_id='j-123') + + with self.assertRaises(TypeError): + self.service_connection.describe_step(step_id='abc') + + response = self.service_connection.describe_step( + cluster_id='j-123', step_id='abc') + + self.assert_request_parameters({ + 'Action': 'DescribeStep', + 'ClusterId': 'j-123', + 'StepId': 'abc', + 'Version': '2009-03-31' + }) + + +class TestAddJobFlowSteps(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return b""" + + + Foo + Bar + + +""" + + def test_add_jobflow_steps(self): + self.set_http_response(200) + + response = self.service_connection.add_jobflow_steps( + jobflow_id='j-123', steps=[]) + + # Make sure the correct object is returned, as this was + # previously set to incorrectly return an empty instance + # of RunJobFlowResponse. + self.assertTrue(isinstance(response, JobFlowStepList)) + self.assertEqual(response.stepids[0].value, 'Foo') + self.assertEqual(response.stepids[1].value, 'Bar') + + +class TestBuildTagList(AWSMockServiceTestCase): + connection_class = EmrConnection + + def test_key_without_value_encoding(self): + input_dict = { + 'KeyWithNoValue': '', + 'AnotherKeyWithNoValue': None + } + res = self.service_connection._build_tag_list(input_dict) + # Keys are outputted in ascending key order. + expected = { + 'Tags.member.1.Key': 'AnotherKeyWithNoValue', + 'Tags.member.2.Key': 'KeyWithNoValue' + } + self.assertEqual(expected, res) + + def test_key_full_key_value_encoding(self): + input_dict = { + 'FirstKey': 'One', + 'SecondKey': 'Two' + } + res = self.service_connection._build_tag_list(input_dict) + # Keys are outputted in ascending key order. + expected = { + 'Tags.member.1.Key': 'FirstKey', + 'Tags.member.1.Value': 'One', + 'Tags.member.2.Key': 'SecondKey', + 'Tags.member.2.Value': 'Two' + } + self.assertEqual(expected, res) + + +class TestAddTag(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return b""" + + + 88888888-8888-8888-8888-888888888888 + + + """ + + def test_add_mix_of_tags_with_without_values(self): + input_tags = { + 'FirstKey': 'One', + 'SecondKey': 'Two', + 'ZzzNoValue': '' + } + self.set_http_response(200) + + with self.assertRaises(TypeError): + self.service_connection.add_tags() + + with self.assertRaises(TypeError): + self.service_connection.add_tags('j-123') + + with self.assertRaises(AssertionError): + self.service_connection.add_tags('j-123', []) + + response = self.service_connection.add_tags('j-123', input_tags) + + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'AddTags', + 'ResourceId': 'j-123', + 'Tags.member.1.Key': 'FirstKey', + 'Tags.member.1.Value': 'One', + 'Tags.member.2.Key': 'SecondKey', + 'Tags.member.2.Value': 'Two', + 'Tags.member.3.Key': 'ZzzNoValue', + 'Version': '2009-03-31' + }) + + +class TestRemoveTag(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return b""" + + + 88888888-8888-8888-8888-888888888888 + + + """ + + def test_remove_tags(self): + input_tags = { + 'FirstKey': 'One', + 'SecondKey': 'Two', + 'ZzzNoValue': '' + } + self.set_http_response(200) + + with self.assertRaises(TypeError): + self.service_connection.add_tags() + + with self.assertRaises(TypeError): + self.service_connection.add_tags('j-123') + + with self.assertRaises(AssertionError): + self.service_connection.add_tags('j-123', []) + + response = self.service_connection.remove_tags('j-123', ['FirstKey', 'SecondKey']) + + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'RemoveTags', + 'ResourceId': 'j-123', + 'TagKeys.member.1': 'FirstKey', + 'TagKeys.member.2': 'SecondKey', + 'Version': '2009-03-31' + }) + +class DescribeJobFlowsTestBase(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return b""" + + + + + 2.4.2 + + 2014-01-24T01:21:21Z + Terminated by user request + 2014-01-24T01:25:26Z + 2014-01-24T01:25:26Z + TERMINATED + 2014-01-24T02:19:46Z + + + true + + test analytics + j-aaaaaa + + + + 2014-01-24T01:21:21Z + 2014-01-24T01:25:26Z + COMPLETED + 2014-01-24T01:26:08Z + + + + + s3://us-west-1.elasticmapreduce/libs/hive/hive-script + --base-path + s3://us-west-1.elasticmapreduce/libs/hive/ + --install-hive + --hive-versions + 0.11.0.1 + + s3://us-west-1.elasticmapreduce/libs/script-runner/script-runner.jar + + + Setup hive + TERMINATE_JOB_FLOW + + + + + + us-west-1c + + m1.large + my_key + true + + + 2014-01-24T01:21:21Z + 0 + 2014-01-24T01:23:56Z + 2014-01-24T01:25:08Z + ENDED + 2014-01-24T02:19:46Z + 1 + m1.large + Job flow terminated + ON_DEMAND + ig-aaaaaa + MASTER + Master instance group + + + 2014-01-24T01:21:21Z + 0 + 2014-01-24T01:25:26Z + 2014-01-24T01:25:26Z + ENDED + 2014-01-24T02:19:46Z + 2 + m1.large + Job flow terminated + ON_DEMAND + ig-aaaaab + CORE + Core instance group + + + m1.large + i-aaaaaa + 1.0.3 + 12 + ec2-184-0-0-1.us-west-1.compute.amazonaws.com + 3 + false + + + + + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + """ + +class TestDescribeJobFlows(DescribeJobFlowsTestBase): + + def test_describe_jobflows_response(self): + self.set_http_response(200) + + response = self.service_connection.describe_jobflows() + self.assertTrue(isinstance(response, list)) + + jf = response[0] + self.assertTrue(isinstance(jf, JobFlow)) + self.assertEqual(jf.amiversion, '2.4.2') + self.assertEqual(jf.visibletoallusers, 'true') + self.assertEqual(jf.name, 'test analytics') + self.assertEqual(jf.jobflowid, 'j-aaaaaa') + self.assertEqual(jf.ec2keyname, 'my_key') + self.assertEqual(jf.masterinstancetype, 'm1.large') + self.assertEqual(jf.availabilityzone, 'us-west-1c') + self.assertEqual(jf.keepjobflowalivewhennosteps, 'true') + self.assertEqual(jf.slaveinstancetype, 'm1.large') + self.assertEqual(jf.masterinstanceid, 'i-aaaaaa') + self.assertEqual(jf.hadoopversion, '1.0.3') + self.assertEqual(jf.normalizedinstancehours, '12') + self.assertEqual(jf.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com') + self.assertEqual(jf.instancecount, '3') + self.assertEqual(jf.terminationprotected, 'false') + + self.assertTrue(isinstance(jf.steps, list)) + step = jf.steps[0] + self.assertTrue(isinstance(step, Step)) + self.assertEqual(step.jar, 's3://us-west-1.elasticmapreduce/libs/script-runner/script-runner.jar') + self.assertEqual(step.name, 'Setup hive') + self.assertEqual(step.actiononfailure, 'TERMINATE_JOB_FLOW') + + self.assertTrue(isinstance(jf.instancegroups, list)) + ig = jf.instancegroups[0] + self.assertTrue(isinstance(ig, InstanceGroup)) + self.assertEqual(ig.creationdatetime, '2014-01-24T01:21:21Z') + self.assertEqual(ig.state, 'ENDED') + self.assertEqual(ig.instancerequestcount, '1') + self.assertEqual(ig.instancetype, 'm1.large') + self.assertEqual(ig.laststatechangereason, 'Job flow terminated') + self.assertEqual(ig.market, 'ON_DEMAND') + self.assertEqual(ig.instancegroupid, 'ig-aaaaaa') + self.assertEqual(ig.instancerole, 'MASTER') + self.assertEqual(ig.name, 'Master instance group') + + def test_describe_jobflows_no_args(self): + self.set_http_response(200) + + self.service_connection.describe_jobflows() + + self.assert_request_parameters({ + 'Action': 'DescribeJobFlows', + }, ignore_params_values=['Version']) + + def test_describe_jobflows_filtered(self): + self.set_http_response(200) + + now = datetime.now() + a_bit_before = datetime.fromtimestamp(time() - 1000) + + self.service_connection.describe_jobflows(states=['WAITING', 'RUNNING'], jobflow_ids=['j-aaaaaa', 'j-aaaaab'], created_after=a_bit_before, created_before=now) + self.assert_request_parameters({ + 'Action': 'DescribeJobFlows', + 'JobFlowIds.member.1': 'j-aaaaaa', + 'JobFlowIds.member.2': 'j-aaaaab', + 'JobFlowStates.member.1': 'WAITING', + 'JobFlowStates.member.2': 'RUNNING', + 'CreatedAfter': a_bit_before.strftime(boto.utils.ISO8601), + 'CreatedBefore': now.strftime(boto.utils.ISO8601), + }, ignore_params_values=['Version']) + +class TestDescribeJobFlow(DescribeJobFlowsTestBase): + def test_describe_jobflow(self): + self.set_http_response(200) + + response = self.service_connection.describe_jobflow('j-aaaaaa') + self.assertTrue(isinstance(response, JobFlow)) + self.assert_request_parameters({ + 'Action': 'DescribeJobFlows', + 'JobFlowIds.member.1': 'j-aaaaaa', + }, ignore_params_values=['Version']) + +class TestRunJobFlow(AWSMockServiceTestCase): + connection_class = EmrConnection + + def default_body(self): + return b""" + + + j-ZKIY4CKQRX72 + + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + +""" + + def test_run_jobflow_service_role(self): + self.set_http_response(200) + + response = self.service_connection.run_jobflow( + 'EmrCluster', service_role='EMR_DefaultRole') + + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'RunJobFlow', + 'Version': '2009-03-31', + 'ServiceRole': 'EMR_DefaultRole', + 'Name': 'EmrCluster' }, + ignore_params_values=['ActionOnFailure', 'Instances.InstanceCount', + 'Instances.KeepJobFlowAliveWhenNoSteps', + 'Instances.MasterInstanceType', + 'Instances.SlaveInstanceType']) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/emr/test_emr_responses.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/emr/test_emr_responses.py new file mode 100644 index 0000000000000000000000000000000000000000..dda6b9288ad91ac08df51099bb6d4c6f00bca5d0 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/emr/test_emr_responses.py @@ -0,0 +1,388 @@ +# Copyright (c) 2010 Jeremy Thurgood +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +# NOTE: These tests only cover the very simple cases I needed to test +# for the InstanceGroup fix. + +import xml.sax + +from boto import handler +from boto.emr import emrobject +from boto.resultset import ResultSet +from tests.compat import unittest + + +JOB_FLOW_EXAMPLE = b""" + + + + + + 2009-01-28T21:49:16Z + 2009-01-28T21:49:16Z + STARTING + + + + + + + s3://elasticmapreduce/libs/hue/install-hue + + Install Hue + + + + true + + Hue + + MyJobFlowName + mybucket/subdir/ + + + + 2009-01-28T21:49:16Z + PENDING + + + + MyJarFile + MyMailClass + + arg1 + arg2 + + + + MyStepName + CONTINUE + + + + j-3UN6WX5RRO2AG + + + us-east-1a + + m1.small + m1.small + myec2keyname + 4 + true + + + + + + 9cea3229-ed85-11dd-9877-6fad448a8419 + + +""" + +JOB_FLOW_COMPLETED = b""" + + + + + + 2010-10-21T01:00:25Z + Steps completed + 2010-10-21T01:03:59Z + 2010-10-21T01:03:59Z + COMPLETED + 2010-10-21T01:44:18Z + + + RealJobFlowName + s3n://example.emrtest.scripts/jobflow_logs/ + + + + + s3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar + + s3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch + + + + Setup Hadoop Debugging + TERMINATE_JOB_FLOW + + + 2010-10-21T01:00:25Z + 2010-10-21T01:03:59Z + COMPLETED + 2010-10-21T01:04:22Z + + + + + + /home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar + + -mapper + s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialMapper.py + -reducer + s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialReducer.py + -input + s3://example.emrtest.data/raw/2010/10/20/* + -input + s3://example.emrtest.data/raw/2010/10/19/* + -input + s3://example.emrtest.data/raw/2010/10/18/* + -input + s3://example.emrtest.data/raw/2010/10/17/* + -input + s3://example.emrtest.data/raw/2010/10/16/* + -input + s3://example.emrtest.data/raw/2010/10/15/* + -input + s3://example.emrtest.data/raw/2010/10/14/* + -output + s3://example.emrtest.crunched/ + + + + testjob_Initial + TERMINATE_JOB_FLOW + + + 2010-10-21T01:00:25Z + 2010-10-21T01:04:22Z + COMPLETED + 2010-10-21T01:36:18Z + + + + + + /home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar + + -mapper + s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Mapper.py + -reducer + s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Reducer.py + -input + s3://example.emrtest.crunched/* + -output + s3://example.emrtest.step1/ + + + + testjob_step1 + TERMINATE_JOB_FLOW + + + 2010-10-21T01:00:25Z + 2010-10-21T01:36:18Z + COMPLETED + 2010-10-21T01:37:51Z + + + + + + /home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar + + -mapper + s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Mapper.py + -reducer + s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Reducer.py + -input + s3://example.emrtest.crunched/* + -output + s3://example.emrtest.step2/ + + + + testjob_step2 + TERMINATE_JOB_FLOW + + + 2010-10-21T01:00:25Z + 2010-10-21T01:37:51Z + COMPLETED + 2010-10-21T01:39:32Z + + + + + + /home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar + + -mapper + s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Mapper.py + -reducer + s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Reducer.py + -input + s3://example.emrtest.step1/* + -output + s3://example.emrtest.step3/ + + + + testjob_step3 + TERMINATE_JOB_FLOW + + + 2010-10-21T01:00:25Z + 2010-10-21T01:39:32Z + COMPLETED + 2010-10-21T01:41:22Z + + + + + + /home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar + + -mapper + s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Mapper.py + -reducer + s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Reducer.py + -input + s3://example.emrtest.step1/* + -output + s3://example.emrtest.step4/ + + + + testjob_step4 + TERMINATE_JOB_FLOW + + + 2010-10-21T01:00:25Z + 2010-10-21T01:41:22Z + COMPLETED + 2010-10-21T01:43:03Z + + + + j-3H3Q13JPFLU22 + + m1.large + i-64c21609 + + us-east-1b + + + + 2010-10-21T01:00:25Z + 0 + 2010-10-21T01:02:09Z + 2010-10-21T01:03:03Z + ENDED + 2010-10-21T01:44:18Z + 1 + m1.large + ON_DEMAND + Job flow terminated + MASTER + ig-EVMHOZJ2SCO8 + master + + + 2010-10-21T01:00:25Z + 0 + 2010-10-21T01:03:59Z + 2010-10-21T01:03:59Z + ENDED + 2010-10-21T01:44:18Z + 9 + m1.large + ON_DEMAND + Job flow terminated + CORE + ig-YZHDYVITVHKB + slave + + + 40 + 0.20 + m1.large + ec2-184-72-153-139.compute-1.amazonaws.com + myubersecurekey + 10 + false + + + + + + c31e701d-dcb4-11df-b5d9-337fc7fe4773 + + +""" + + +class TestEMRResponses(unittest.TestCase): + def _parse_xml(self, body, markers): + rs = ResultSet(markers) + h = handler.XmlHandler(rs, None) + xml.sax.parseString(body, h) + return rs + + def _assert_fields(self, response, **fields): + for field, expected in fields.items(): + actual = getattr(response, field) + self.assertEquals(expected, actual, + "Field %s: %r != %r" % (field, expected, actual)) + + def test_JobFlows_example(self): + [jobflow] = self._parse_xml(JOB_FLOW_EXAMPLE, + [('member', emrobject.JobFlow)]) + self._assert_fields(jobflow, + creationdatetime='2009-01-28T21:49:16Z', + startdatetime='2009-01-28T21:49:16Z', + state='STARTING', + instancecount='4', + jobflowid='j-3UN6WX5RRO2AG', + loguri='mybucket/subdir/', + name='MyJobFlowName', + availabilityzone='us-east-1a', + slaveinstancetype='m1.small', + masterinstancetype='m1.small', + ec2keyname='myec2keyname', + keepjobflowalivewhennosteps='true') + + def test_JobFlows_completed(self): + [jobflow] = self._parse_xml(JOB_FLOW_COMPLETED, + [('member', emrobject.JobFlow)]) + self._assert_fields(jobflow, + creationdatetime='2010-10-21T01:00:25Z', + startdatetime='2010-10-21T01:03:59Z', + enddatetime='2010-10-21T01:44:18Z', + state='COMPLETED', + instancecount='10', + jobflowid='j-3H3Q13JPFLU22', + loguri='s3n://example.emrtest.scripts/jobflow_logs/', + name='RealJobFlowName', + availabilityzone='us-east-1b', + slaveinstancetype='m1.large', + masterinstancetype='m1.large', + ec2keyname='myubersecurekey', + keepjobflowalivewhennosteps='false') + self.assertEquals(6, len(jobflow.steps)) + self.assertEquals(2, len(jobflow.instancegroups)) + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/emr/test_instance_group_args.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/emr/test_instance_group_args.py new file mode 100644 index 0000000000000000000000000000000000000000..cc5c747bb7f28757b5f6de13410f973dd89d02b9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/emr/test_instance_group_args.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +# Author: Charlie Schluting +# +# Test to ensure initalization of InstanceGroup object emits appropriate errors +# if bidprice is not specified, but allows float, int, Decimal. + +from decimal import Decimal + +from tests.compat import unittest +from boto.emr.instance_group import InstanceGroup + + +class TestInstanceGroupArgs(unittest.TestCase): + + def test_bidprice_missing_spot(self): + """ + Test InstanceGroup init raises ValueError when market==spot and + bidprice is not specified. + """ + with self.assertRaisesRegexp(ValueError, 'bidprice must be specified'): + InstanceGroup(1, 'MASTER', 'm1.small', + 'SPOT', 'master') + + def test_bidprice_missing_ondemand(self): + """ + Test InstanceGroup init accepts a missing bidprice arg, when market is + ON_DEMAND. + """ + instance_group = InstanceGroup(1, 'MASTER', 'm1.small', + 'ON_DEMAND', 'master') + + def test_bidprice_Decimal(self): + """ + Test InstanceGroup init works with bidprice type = Decimal. + """ + instance_group = InstanceGroup(1, 'MASTER', 'm1.small', + 'SPOT', 'master', bidprice=Decimal(1.10)) + self.assertEquals('1.10', instance_group.bidprice[:4]) + + def test_bidprice_float(self): + """ + Test InstanceGroup init works with bidprice type = float. + """ + instance_group = InstanceGroup(1, 'MASTER', 'm1.small', + 'SPOT', 'master', bidprice=1.1) + self.assertEquals('1.1', instance_group.bidprice) + + def test_bidprice_string(self): + """ + Test InstanceGroup init works with bidprice type = string. + """ + instance_group = InstanceGroup(1, 'MASTER', 'm1.small', + 'SPOT', 'master', bidprice='1.1') + self.assertEquals('1.1', instance_group.bidprice) + +if __name__ == "__main__": + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_concurrent.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_concurrent.py new file mode 100644 index 0000000000000000000000000000000000000000..dd33e1702512b828c970e58bcfe024be6917bbd4 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_concurrent.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import tempfile +from boto.compat import Queue + +from tests.compat import mock, unittest +from tests.unit import AWSMockServiceTestCase + +from boto.glacier.concurrent import ConcurrentUploader, ConcurrentDownloader +from boto.glacier.concurrent import UploadWorkerThread +from boto.glacier.concurrent import _END_SENTINEL + + +class FakeThreadedConcurrentUploader(ConcurrentUploader): + def _start_upload_threads(self, results_queue, upload_id, + worker_queue, filename): + self.results_queue = results_queue + self.worker_queue = worker_queue + self.upload_id = upload_id + + def _wait_for_upload_threads(self, hash_chunks, result_queue, total_parts): + for i in range(total_parts): + hash_chunks[i] = b'foo' + + +class FakeThreadedConcurrentDownloader(ConcurrentDownloader): + def _start_download_threads(self, results_queue, worker_queue): + self.results_queue = results_queue + self.worker_queue = worker_queue + + def _wait_for_download_threads(self, filename, result_queue, total_parts): + pass + + +class TestConcurrentUploader(unittest.TestCase): + + def setUp(self): + super(TestConcurrentUploader, self).setUp() + self.stat_patch = mock.patch('os.stat') + self.addCleanup(self.stat_patch.stop) + self.stat_mock = self.stat_patch.start() + # Give a default value for tests that don't care + # what the file size is. + self.stat_mock.return_value.st_size = 1024 * 1024 * 8 + + def test_calculate_required_part_size(self): + self.stat_mock.return_value.st_size = 1024 * 1024 * 8 + uploader = ConcurrentUploader(mock.Mock(), 'vault_name') + total_parts, part_size = uploader._calculate_required_part_size( + 1024 * 1024 * 8) + self.assertEqual(total_parts, 2) + self.assertEqual(part_size, 4 * 1024 * 1024) + + def test_calculate_required_part_size_too_small(self): + too_small = 1 * 1024 * 1024 + self.stat_mock.return_value.st_size = 1024 * 1024 * 1024 + uploader = ConcurrentUploader(mock.Mock(), 'vault_name', + part_size=too_small) + total_parts, part_size = uploader._calculate_required_part_size( + 1024 * 1024 * 1024) + self.assertEqual(total_parts, 256) + # Part size if 4MB not the passed in 1MB. + self.assertEqual(part_size, 4 * 1024 * 1024) + + def test_work_queue_is_correctly_populated(self): + uploader = FakeThreadedConcurrentUploader(mock.MagicMock(), + 'vault_name') + uploader.upload('foofile') + q = uploader.worker_queue + items = [q.get() for i in range(q.qsize())] + self.assertEqual(items[0], (0, 4 * 1024 * 1024)) + self.assertEqual(items[1], (1, 4 * 1024 * 1024)) + # 2 for the parts, 10 for the end sentinels (10 threads). + self.assertEqual(len(items), 12) + + def test_correct_low_level_api_calls(self): + api_mock = mock.MagicMock() + uploader = FakeThreadedConcurrentUploader(api_mock, 'vault_name') + uploader.upload('foofile') + # The threads call the upload_part, so we're just verifying the + # initiate/complete multipart API calls. + api_mock.initiate_multipart_upload.assert_called_with( + 'vault_name', 4 * 1024 * 1024, None) + api_mock.complete_multipart_upload.assert_called_with( + 'vault_name', mock.ANY, mock.ANY, 8 * 1024 * 1024) + + def test_downloader_work_queue_is_correctly_populated(self): + job = mock.MagicMock() + job.archive_size = 8 * 1024 * 1024 + downloader = FakeThreadedConcurrentDownloader(job) + downloader.download('foofile') + q = downloader.worker_queue + items = [q.get() for i in range(q.qsize())] + self.assertEqual(items[0], (0, 4 * 1024 * 1024)) + self.assertEqual(items[1], (1, 4 * 1024 * 1024)) + # 2 for the parts, 10 for the end sentinels (10 threads). + self.assertEqual(len(items), 12) + + +class TestUploaderThread(unittest.TestCase): + def setUp(self): + self.fileobj = tempfile.NamedTemporaryFile() + self.filename = self.fileobj.name + + def test_fileobj_closed_when_thread_shuts_down(self): + thread = UploadWorkerThread(mock.Mock(), 'vault_name', + self.filename, 'upload_id', + Queue(), Queue()) + fileobj = thread._fileobj + self.assertFalse(fileobj.closed) + # By settings should_continue to False, it should immediately + # exit, and we can still verify cleanup behavior. + thread.should_continue = False + thread.run() + self.assertTrue(fileobj.closed) + + def test_upload_errors_have_exception_messages(self): + api = mock.Mock() + job_queue = Queue() + result_queue = Queue() + upload_thread = UploadWorkerThread( + api, 'vault_name', self.filename, + 'upload_id', job_queue, result_queue, num_retries=1, + time_between_retries=0) + api.upload_part.side_effect = Exception("exception message") + job_queue.put((0, 1024)) + job_queue.put(_END_SENTINEL) + + upload_thread.run() + result = result_queue.get(timeout=1) + self.assertIn("exception message", str(result)) + + def test_num_retries_is_obeyed(self): + # total attempts is 1 + num_retries so if I have num_retries of 2, + # I'll attempt the upload once, and if that fails I'll retry up to + # 2 more times for a total of 3 attempts. + api = mock.Mock() + job_queue = Queue() + result_queue = Queue() + upload_thread = UploadWorkerThread( + api, 'vault_name', self.filename, + 'upload_id', job_queue, result_queue, num_retries=2, + time_between_retries=0) + api.upload_part.side_effect = Exception() + job_queue.put((0, 1024)) + job_queue.put(_END_SENTINEL) + + upload_thread.run() + self.assertEqual(api.upload_part.call_count, 3) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_job.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_job.py new file mode 100644 index 0000000000000000000000000000000000000000..c7b7b1fba08e9af0a125a9177dd7aa644a8feea6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_job.py @@ -0,0 +1,81 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import StringIO +from tests.compat import mock, unittest + +from boto.glacier.job import Job +from boto.glacier.layer1 import Layer1 +from boto.glacier.response import GlacierResponse +from boto.glacier.exceptions import TreeHashDoesNotMatchError + + +class TestJob(unittest.TestCase): + def setUp(self): + self.api = mock.Mock(spec=Layer1) + self.vault = mock.Mock() + self.vault.layer1 = self.api + self.job = Job(self.vault) + + def test_get_job_validate_checksum_success(self): + response = GlacierResponse(mock.Mock(), None) + response['TreeHash'] = 'tree_hash' + self.api.get_job_output.return_value = response + with mock.patch('boto.glacier.job.tree_hash_from_str') as t: + t.return_value = 'tree_hash' + self.job.get_output(byte_range=(1, 1024), validate_checksum=True) + + def test_get_job_validation_fails(self): + response = GlacierResponse(mock.Mock(), None) + response['TreeHash'] = 'tree_hash' + self.api.get_job_output.return_value = response + with mock.patch('boto.glacier.job.tree_hash_from_str') as t: + t.return_value = 'BAD_TREE_HASH_VALUE' + with self.assertRaises(TreeHashDoesNotMatchError): + # With validate_checksum set to True, this call fails. + self.job.get_output(byte_range=(1, 1024), validate_checksum=True) + # With validate_checksum set to False, this call succeeds. + self.job.get_output(byte_range=(1, 1024), validate_checksum=False) + + def test_download_to_fileobj(self): + http_response = mock.Mock(read=mock.Mock(return_value='xyz')) + response = GlacierResponse(http_response, None) + response['TreeHash'] = 'tree_hash' + self.api.get_job_output.return_value = response + fileobj = StringIO() + self.job.archive_size = 3 + with mock.patch('boto.glacier.job.tree_hash_from_str') as t: + t.return_value = 'tree_hash' + self.job.download_to_fileobj(fileobj) + fileobj.seek(0) + self.assertEqual(http_response.read.return_value, fileobj.read()) + + def test_calc_num_chunks(self): + self.job.archive_size = 0 + self.assertEqual(self.job._calc_num_chunks(self.job.DefaultPartSize), 0) + self.job.archive_size = 1 + self.assertEqual(self.job._calc_num_chunks(self.job.DefaultPartSize), 1) + self.job.archive_size = self.job.DefaultPartSize + 1 + self.assertEqual(self.job._calc_num_chunks(self.job.DefaultPartSize), 2) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..4c8f0cf75169f575c05e4179b4f43563bf08dd0f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_layer1.py @@ -0,0 +1,98 @@ +import json +import copy +import tempfile + +from tests.unit import AWSMockServiceTestCase +from boto.glacier.layer1 import Layer1 + + +class GlacierLayer1ConnectionBase(AWSMockServiceTestCase): + connection_class = Layer1 + + def setUp(self): + super(GlacierLayer1ConnectionBase, self).setUp() + self.json_header = [('Content-Type', 'application/json')] + self.vault_name = u'examplevault' + self.vault_arn = 'arn:aws:glacier:us-east-1:012345678901:vaults/' + \ + self.vault_name + self.vault_info = {u'CreationDate': u'2012-03-16T22:22:47.214Z', + u'LastInventoryDate': u'2012-03-21T22:06:51.218Z', + u'NumberOfArchives': 2, + u'SizeInBytes': 12334, + u'VaultARN': self.vault_arn, + u'VaultName': self.vault_name} + + +class GlacierVaultsOperations(GlacierLayer1ConnectionBase): + + def test_create_vault_parameters(self): + self.set_http_response(status_code=201) + self.service_connection.create_vault(self.vault_name) + + def test_list_vaults(self): + content = {u'Marker': None, + u'RequestId': None, + u'VaultList': [self.vault_info]} + self.set_http_response(status_code=200, header=self.json_header, + body=json.dumps(content).encode('utf-8')) + api_response = self.service_connection.list_vaults() + self.assertDictEqual(content, api_response) + + def test_describe_vaults(self): + content = copy.copy(self.vault_info) + content[u'RequestId'] = None + self.set_http_response(status_code=200, header=self.json_header, + body=json.dumps(content).encode('utf-8')) + api_response = self.service_connection.describe_vault(self.vault_name) + self.assertDictEqual(content, api_response) + + def test_delete_vault(self): + self.set_http_response(status_code=204) + self.service_connection.delete_vault(self.vault_name) + + +class GlacierJobOperations(GlacierLayer1ConnectionBase): + + def setUp(self): + super(GlacierJobOperations, self).setUp() + self.job_content = 'abc' * 1024 + + def test_initiate_archive_job(self): + content = {u'Type': u'archive-retrieval', + u'ArchiveId': u'AAABZpJrTyioDC_HsOmHae8EZp_uBSJr6cnGOLKp_XJCl-Q', + u'Description': u'Test Archive', + u'SNSTopic': u'Topic', + u'JobId': None, + u'Location': None, + u'RequestId': None} + self.set_http_response(status_code=202, header=self.json_header, + body=json.dumps(content).encode('utf-8')) + api_response = self.service_connection.initiate_job(self.vault_name, + self.job_content) + self.assertDictEqual(content, api_response) + + def test_get_archive_output(self): + header = [('Content-Type', 'application/octet-stream')] + self.set_http_response(status_code=200, header=header, + body=self.job_content) + response = self.service_connection.get_job_output(self.vault_name, + 'example-job-id') + self.assertEqual(self.job_content, response.read()) + + +class GlacierUploadArchiveResets(GlacierLayer1ConnectionBase): + def test_upload_archive(self): + fake_data = tempfile.NamedTemporaryFile() + fake_data.write(b'foobarbaz') + # First seek to a non zero offset. + fake_data.seek(2) + self.set_http_response(status_code=201) + # Simulate reading the request body when we send the request. + self.service_connection.connection.request.side_effect = \ + lambda *args: fake_data.read() + self.service_connection.upload_archive('vault_name', fake_data, 'linear_hash', + 'tree_hash') + # Verify that we seek back to the original offset after making + # a request. This ensures that if we need to resend the request we're + # back at the correct location within the file. + self.assertEqual(fake_data.tell(), 2) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_layer2.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_layer2.py new file mode 100644 index 0000000000000000000000000000000000000000..84b53aac774330ea5c0e1827b9946a8cd5448e4d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_layer2.py @@ -0,0 +1,338 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.unit import unittest + +from mock import call, Mock, patch, sentinel + +import codecs +from boto.glacier.layer1 import Layer1 +from boto.glacier.layer2 import Layer2 +import boto.glacier.vault +from boto.glacier.vault import Vault +from boto.glacier.vault import Job + +from datetime import datetime, tzinfo, timedelta + +# Some fixture data from the Glacier docs +FIXTURE_VAULT = { + "CreationDate": "2012-02-20T17:01:45.198Z", + "LastInventoryDate": "2012-03-20T17:03:43.221Z", + "NumberOfArchives": 192, + "SizeInBytes": 78088912, + "VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault", + "VaultName": "examplevault" +} + +FIXTURE_VAULTS = { + 'RequestId': 'vuXO7SHTw-luynJ0Zu31AYjR3TcCn7X25r7ykpuulxY2lv8', + 'VaultList': [{'SizeInBytes': 0, 'LastInventoryDate': None, + 'VaultARN': 'arn:aws:glacier:us-east-1:686406519478:vaults/vault0', + 'VaultName': 'vault0', 'NumberOfArchives': 0, + 'CreationDate': '2013-05-17T02:38:39.049Z'}, + {'SizeInBytes': 0, 'LastInventoryDate': None, + 'VaultARN': 'arn:aws:glacier:us-east-1:686406519478:vaults/vault3', + 'VaultName': 'vault3', 'NumberOfArchives': 0, + 'CreationDate': '2013-05-17T02:31:18.659Z'}]} + +FIXTURE_PAGINATED_VAULTS = { + 'Marker': 'arn:aws:glacier:us-east-1:686406519478:vaults/vault2', + 'RequestId': 'vuXO7SHTw-luynJ0Zu31AYjR3TcCn7X25r7ykpuulxY2lv8', + 'VaultList': [{'SizeInBytes': 0, 'LastInventoryDate': None, + 'VaultARN': 'arn:aws:glacier:us-east-1:686406519478:vaults/vault0', + 'VaultName': 'vault0', 'NumberOfArchives': 0, + 'CreationDate': '2013-05-17T02:38:39.049Z'}, + {'SizeInBytes': 0, 'LastInventoryDate': None, + 'VaultARN': 'arn:aws:glacier:us-east-1:686406519478:vaults/vault1', + 'VaultName': 'vault1', 'NumberOfArchives': 0, + 'CreationDate': '2013-05-17T02:31:18.659Z'}]} +FIXTURE_PAGINATED_VAULTS_CONT = { + 'Marker': None, + 'RequestId': 'vuXO7SHTw-luynJ0Zu31AYjR3TcCn7X25r7ykpuulxY2lv8', + 'VaultList': [{'SizeInBytes': 0, 'LastInventoryDate': None, + 'VaultARN': 'arn:aws:glacier:us-east-1:686406519478:vaults/vault2', + 'VaultName': 'vault2', 'NumberOfArchives': 0, + 'CreationDate': '2013-05-17T02:38:39.049Z'}, + {'SizeInBytes': 0, 'LastInventoryDate': None, + 'VaultARN': 'arn:aws:glacier:us-east-1:686406519478:vaults/vault3', + 'VaultName': 'vault3', 'NumberOfArchives': 0, + 'CreationDate': '2013-05-17T02:31:18.659Z'}]} + +FIXTURE_ARCHIVE_JOB = { + "Action": "ArchiveRetrieval", + "ArchiveId": ("NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-TjhqG6eGoOY9Z8i1_AUyUs" + "uhPAdTqLHy8pTl5nfCFJmDl2yEZONi5L26Omw12vcs01MNGntHEQL8MBfGlqr" + "EXAMPLEArchiveId"), + "ArchiveSizeInBytes": 16777216, + "Completed": False, + "CreationDate": "2012-05-15T17:21:39.339Z", + "CompletionDate": "2012-05-15T17:21:43.561Z", + "InventorySizeInBytes": None, + "JobDescription": "My ArchiveRetrieval Job", + "JobId": ("HkF9p6o7yjhFx-K3CGl6fuSm6VzW9T7esGQfco8nUXVYwS0jlb5gq1JZ55yHgt5v" + "P54ZShjoQzQVVh7vEXAMPLEjobID"), + "SHA256TreeHash": ("beb0fe31a1c7ca8c6c04d574ea906e3f97b31fdca7571defb5b44dc" + "a89b5af60"), + "SNSTopic": "arn:aws:sns:us-east-1:012345678901:mytopic", + "StatusCode": "InProgress", + "StatusMessage": "Operation in progress.", + "VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault" +} + +EXAMPLE_PART_LIST_RESULT_PAGE_1 = { + "ArchiveDescription": "archive description 1", + "CreationDate": "2012-03-20T17:03:43.221Z", + "Marker": "MfgsKHVjbQ6EldVl72bn3_n5h2TaGZQUO-Qb3B9j3TITf7WajQ", + "MultipartUploadId": "OW2fM5iVylEpFEMM9_HpKowRapC3vn5sSL39_396UW9zLFUWVrnRHaPjUJddQ5OxSHVXjYtrN47NBZ-khxOjyEXAMPLE", + "PartSizeInBytes": 4194304, + "Parts": [{ + "RangeInBytes": "4194304-8388607", + "SHA256TreeHash": "01d34dabf7be316472c93b1ef80721f5d4" + }], + "VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/demo1-vault" +} + +# The documentation doesn't say whether the non-Parts fields are defined in +# future pages, so assume they are not. +EXAMPLE_PART_LIST_RESULT_PAGE_2 = { + "ArchiveDescription": None, + "CreationDate": None, + "Marker": None, + "MultipartUploadId": None, + "PartSizeInBytes": None, + "Parts": [{ + "RangeInBytes": "0-4194303", + "SHA256TreeHash": "01d34dabf7be316472c93b1ef80721f5d4" + }], + "VaultARN": None +} + +EXAMPLE_PART_LIST_COMPLETE = { + "ArchiveDescription": "archive description 1", + "CreationDate": "2012-03-20T17:03:43.221Z", + "Marker": None, + "MultipartUploadId": "OW2fM5iVylEpFEMM9_HpKowRapC3vn5sSL39_396UW9zLFUWVrnRHaPjUJddQ5OxSHVXjYtrN47NBZ-khxOjyEXAMPLE", + "PartSizeInBytes": 4194304, + "Parts": [{ + "RangeInBytes": "4194304-8388607", + "SHA256TreeHash": "01d34dabf7be316472c93b1ef80721f5d4" + }, { + "RangeInBytes": "0-4194303", + "SHA256TreeHash": "01d34dabf7be316472c93b1ef80721f5d4" + }], + "VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/demo1-vault" +} + + +class GlacierLayer2Base(unittest.TestCase): + def setUp(self): + self.mock_layer1 = Mock(spec=Layer1) + + +class TestGlacierLayer2Connection(GlacierLayer2Base): + def setUp(self): + GlacierLayer2Base.setUp(self) + self.layer2 = Layer2(layer1=self.mock_layer1) + + def test_create_vault(self): + self.mock_layer1.describe_vault.return_value = FIXTURE_VAULT + self.layer2.create_vault("My Vault") + self.mock_layer1.create_vault.assert_called_with("My Vault") + + def test_get_vault(self): + self.mock_layer1.describe_vault.return_value = FIXTURE_VAULT + vault = self.layer2.get_vault("examplevault") + self.assertEqual(vault.layer1, self.mock_layer1) + self.assertEqual(vault.name, "examplevault") + self.assertEqual(vault.size, 78088912) + self.assertEqual(vault.number_of_archives, 192) + + def test_list_vaults(self): + self.mock_layer1.list_vaults.return_value = FIXTURE_VAULTS + vaults = self.layer2.list_vaults() + self.assertEqual(vaults[0].name, "vault0") + self.assertEqual(len(vaults), 2) + + def test_list_vaults_paginated(self): + resps = [FIXTURE_PAGINATED_VAULTS, FIXTURE_PAGINATED_VAULTS_CONT] + def return_paginated_vaults_resp(marker=None, limit=None): + return resps.pop(0) + + self.mock_layer1.list_vaults = Mock(side_effect=return_paginated_vaults_resp) + vaults = self.layer2.list_vaults() + self.assertEqual(vaults[0].name, "vault0") + self.assertEqual(vaults[3].name, "vault3") + self.assertEqual(len(vaults), 4) + + +class TestVault(GlacierLayer2Base): + def setUp(self): + GlacierLayer2Base.setUp(self) + self.vault = Vault(self.mock_layer1, FIXTURE_VAULT) + + # TODO: Tests for the other methods of uploading + + def test_create_archive_writer(self): + self.mock_layer1.initiate_multipart_upload.return_value = { + "UploadId": "UPLOADID"} + writer = self.vault.create_archive_writer(description="stuff") + self.mock_layer1.initiate_multipart_upload.assert_called_with( + "examplevault", self.vault.DefaultPartSize, "stuff") + self.assertEqual(writer.vault, self.vault) + self.assertEqual(writer.upload_id, "UPLOADID") + + def test_delete_vault(self): + self.vault.delete_archive("archive") + self.mock_layer1.delete_archive.assert_called_with("examplevault", + "archive") + + def test_initiate_job(self): + class UTC(tzinfo): + """UTC""" + + def utcoffset(self, dt): + return timedelta(0) + + def tzname(self, dt): + return "Z" + + def dst(self, dt): + return timedelta(0) + + self.mock_layer1.initiate_job.return_value = {'JobId': 'job-id'} + self.vault.retrieve_inventory(start_date=datetime(2014, 0o1, 0o1, tzinfo=UTC()), + end_date=datetime(2014, 0o1, 0o2, tzinfo=UTC()), + limit=100) + self.mock_layer1.initiate_job.assert_called_with( + 'examplevault', { + 'Type': 'inventory-retrieval', + 'InventoryRetrievalParameters': { + 'StartDate': '2014-01-01T00:00:00Z', + 'EndDate': '2014-01-02T00:00:00Z', + 'Limit': 100 + } + }) + + def test_get_job(self): + self.mock_layer1.describe_job.return_value = FIXTURE_ARCHIVE_JOB + job = self.vault.get_job( + "NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-TjhqG6eGoOY9Z8i1_AUyUsuhPA" + "dTqLHy8pTl5nfCFJmDl2yEZONi5L26Omw12vcs01MNGntHEQL8MBfGlqrEXAMPLEA" + "rchiveId") + self.assertEqual(job.action, "ArchiveRetrieval") + + def test_list_jobs(self): + self.mock_layer1.list_jobs.return_value = { + "JobList": [FIXTURE_ARCHIVE_JOB]} + jobs = self.vault.list_jobs(False, "InProgress") + self.mock_layer1.list_jobs.assert_called_with("examplevault", + False, "InProgress") + self.assertEqual(jobs[0].archive_id, + "NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-TjhqG6eGoOY9Z" + "8i1_AUyUsuhPAdTqLHy8pTl5nfCFJmDl2yEZONi5L26Omw12vcs0" + "1MNGntHEQL8MBfGlqrEXAMPLEArchiveId") + + def test_list_all_parts_one_page(self): + self.mock_layer1.list_parts.return_value = ( + dict(EXAMPLE_PART_LIST_COMPLETE)) # take a copy + parts_result = self.vault.list_all_parts(sentinel.upload_id) + expected = [call('examplevault', sentinel.upload_id)] + self.assertEquals(expected, self.mock_layer1.list_parts.call_args_list) + self.assertEquals(EXAMPLE_PART_LIST_COMPLETE, parts_result) + + def test_list_all_parts_two_pages(self): + self.mock_layer1.list_parts.side_effect = [ + # take copies + dict(EXAMPLE_PART_LIST_RESULT_PAGE_1), + dict(EXAMPLE_PART_LIST_RESULT_PAGE_2) + ] + parts_result = self.vault.list_all_parts(sentinel.upload_id) + expected = [call('examplevault', sentinel.upload_id), + call('examplevault', sentinel.upload_id, + marker=EXAMPLE_PART_LIST_RESULT_PAGE_1['Marker'])] + self.assertEquals(expected, self.mock_layer1.list_parts.call_args_list) + self.assertEquals(EXAMPLE_PART_LIST_COMPLETE, parts_result) + + @patch('boto.glacier.vault.resume_file_upload') + def test_resume_archive_from_file(self, mock_resume_file_upload): + part_size = 4 + mock_list_parts = Mock() + mock_list_parts.return_value = { + 'PartSizeInBytes': part_size, + 'Parts': [{ + 'RangeInBytes': '0-3', + 'SHA256TreeHash': '12', + }, { + 'RangeInBytes': '4-6', + 'SHA256TreeHash': '34', + }], + } + + self.vault.list_all_parts = mock_list_parts + self.vault.resume_archive_from_file( + sentinel.upload_id, file_obj=sentinel.file_obj) + mock_resume_file_upload.assert_called_once_with( + self.vault, sentinel.upload_id, part_size, sentinel.file_obj, + {0: codecs.decode('12', 'hex_codec'), 1: codecs.decode('34', 'hex_codec')}) + + +class TestJob(GlacierLayer2Base): + def setUp(self): + GlacierLayer2Base.setUp(self) + self.vault = Vault(self.mock_layer1, FIXTURE_VAULT) + self.job = Job(self.vault, FIXTURE_ARCHIVE_JOB) + + def test_get_job_output(self): + self.mock_layer1.get_job_output.return_value = "TEST_OUTPUT" + self.job.get_output((0, 100)) + self.mock_layer1.get_job_output.assert_called_with( + "examplevault", + "HkF9p6o7yjhFx-K3CGl6fuSm6VzW9T7esGQfco8nUXVYwS0jlb5gq1JZ55yHgt5vP" + "54ZShjoQzQVVh7vEXAMPLEjobID", (0, 100)) + + +class TestRangeStringParsing(unittest.TestCase): + def test_simple_range(self): + self.assertEquals( + Vault._range_string_to_part_index('0-3', 4), 0) + + def test_range_one_too_big(self): + # Off-by-one bug in Amazon's Glacier implementation + # See: https://forums.aws.amazon.com/thread.jspa?threadID=106866&tstart=0 + # Workaround is to assume that if a (start, end] range appears to be + # returned then that is what it is. + self.assertEquals( + Vault._range_string_to_part_index('0-4', 4), 0) + + def test_range_too_big(self): + self.assertRaises( + AssertionError, Vault._range_string_to_part_index, '0-5', 4) + + def test_range_start_mismatch(self): + self.assertRaises( + AssertionError, Vault._range_string_to_part_index, '1-3', 4) + + def test_range_end_mismatch(self): + # End mismatch is OK, since the last part might be short + self.assertEquals( + Vault._range_string_to_part_index('0-2', 4), 0) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_response.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_response.py new file mode 100644 index 0000000000000000000000000000000000000000..1f75f64b1c4908292561aba1be8a9044a9650263 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_response.py @@ -0,0 +1,35 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from tests.unit import AWSMockServiceTestCase +from boto.glacier.layer1 import Layer1 +from boto.glacier.response import GlacierResponse + +class TestResponse(AWSMockServiceTestCase): + connection_class = Layer1 + + def test_204_body_isnt_passed_to_json(self): + response = self.create_response(status_code=204,header=[('Content-Type','application/json')]) + result = GlacierResponse(response,response.getheaders()) + self.assertEquals(result.status, response.status) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_utils.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bace2a385d8da7e4e82c197632983fd4ed9c350a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_utils.py @@ -0,0 +1,165 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import logging +import os +import tempfile +import time +from hashlib import sha256 +from tests.unit import unittest + +from boto.compat import BytesIO, six, StringIO +from boto.glacier.utils import minimum_part_size, chunk_hashes, tree_hash, \ + bytes_to_hex, compute_hashes_from_fileobj + + +class TestPartSizeCalculations(unittest.TestCase): + def test_small_values_still_use_default_part_size(self): + self.assertEqual(minimum_part_size(1), 4 * 1024 * 1024) + + def test_under_the_maximum_value(self): + # If we're under the maximum, we can use 4MB part sizes. + self.assertEqual(minimum_part_size(8 * 1024 * 1024), + 4 * 1024 * 1024) + + def test_gigabyte_size(self): + # If we're over the maximum default part size, we go up to the next + # power of two until we find a part size that keeps us under 10,000 + # parts. + self.assertEqual(minimum_part_size(8 * 1024 * 1024 * 10000), + 8 * 1024 * 1024) + + def test_terabyte_size(self): + # For a 4 TB file we need at least a 512 MB part size. + self.assertEqual(minimum_part_size(4 * 1024 * 1024 * 1024 * 1024), + 512 * 1024 * 1024) + + def test_file_size_too_large(self): + with self.assertRaises(ValueError): + minimum_part_size((40000 * 1024 * 1024 * 1024) + 1) + + def test_default_part_size_can_be_specified(self): + default_part_size = 2 * 1024 * 1024 + self.assertEqual(minimum_part_size(8 * 1024 * 1024, default_part_size), + default_part_size) + + +class TestChunking(unittest.TestCase): + def test_chunk_hashes_exact(self): + chunks = chunk_hashes(b'a' * (2 * 1024 * 1024)) + self.assertEqual(len(chunks), 2) + self.assertEqual(chunks[0], sha256(b'a' * 1024 * 1024).digest()) + + def test_chunks_with_leftovers(self): + bytestring = b'a' * (2 * 1024 * 1024 + 20) + chunks = chunk_hashes(bytestring) + self.assertEqual(len(chunks), 3) + self.assertEqual(chunks[0], sha256(b'a' * 1024 * 1024).digest()) + self.assertEqual(chunks[1], sha256(b'a' * 1024 * 1024).digest()) + self.assertEqual(chunks[2], sha256(b'a' * 20).digest()) + + def test_less_than_one_chunk(self): + chunks = chunk_hashes(b'aaaa') + self.assertEqual(len(chunks), 1) + self.assertEqual(chunks[0], sha256(b'aaaa').digest()) + + +class TestTreeHash(unittest.TestCase): + # For these tests, a set of reference tree hashes were computed. + # This will at least catch any regressions to the tree hash + # calculations. + def calculate_tree_hash(self, bytestring): + start = time.time() + calculated = bytes_to_hex(tree_hash(chunk_hashes(bytestring))) + end = time.time() + logging.debug("Tree hash calc time for length %s: %s", + len(bytestring), end - start) + return calculated + + def test_tree_hash_calculations(self): + one_meg_bytestring = b'a' * (1 * 1024 * 1024) + two_meg_bytestring = b'a' * (2 * 1024 * 1024) + four_meg_bytestring = b'a' * (4 * 1024 * 1024) + bigger_bytestring = four_meg_bytestring + b'a' * 20 + + self.assertEqual( + self.calculate_tree_hash(one_meg_bytestring), + b'9bc1b2a288b26af7257a36277ae3816a7d4f16e89c1e7e77d0a5c48bad62b360') + self.assertEqual( + self.calculate_tree_hash(two_meg_bytestring), + b'560c2c9333c719cb00cfdffee3ba293db17f58743cdd1f7e4055373ae6300afa') + self.assertEqual( + self.calculate_tree_hash(four_meg_bytestring), + b'9491cb2ed1d4e7cd53215f4017c23ec4ad21d7050a1e6bb636c4f67e8cddb844') + self.assertEqual( + self.calculate_tree_hash(bigger_bytestring), + b'12f3cbd6101b981cde074039f6f728071da8879d6f632de8afc7cdf00661b08f') + + def test_empty_tree_hash(self): + self.assertEqual( + self.calculate_tree_hash(''), + b'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855') + + +class TestFileHash(unittest.TestCase): + def _gen_data(self): + # Generate some pseudo-random bytes of data. We include the + # hard-coded blob as an example that fails to decode via UTF-8. + return os.urandom(5000) + b'\xc2\x00' + + def test_compute_hash_tempfile(self): + # Compute a hash from a file object. On Python 2 this uses a non- + # binary mode. On Python 3, however, binary mode is required for + # binary files. If not used, you will get UTF-8 code errors. + if six.PY2: + mode = "w+" + else: + mode = "wb+" + + with tempfile.TemporaryFile(mode=mode) as f: + f.write(self._gen_data()) + f.seek(0) + + compute_hashes_from_fileobj(f, chunk_size=512) + + @unittest.skipUnless(six.PY3, 'Python 3 requires reading binary!') + def test_compute_hash_tempfile_py3(self): + # Note the missing 'b' in the mode! + with tempfile.TemporaryFile(mode='w+') as f: + with self.assertRaises(ValueError): + compute_hashes_from_fileobj(f, chunk_size=512) + + # What about file-like objects without a mode? If it has an + # encoding we use it, otherwise attempt UTF-8 encoding to + # bytes for hashing. + f = StringIO('test data' * 500) + compute_hashes_from_fileobj(f, chunk_size=512) + + @unittest.skipUnless(six.PY2, 'Python 3 requires reading binary!') + def test_compute_hash_stringio(self): + # Python 2 binary data in StringIO example + f = StringIO(self._gen_data()) + compute_hashes_from_fileobj(f, chunk_size=512) + + def test_compute_hash_bytesio(self): + # Compute a hash from a file-like BytesIO object. + f = BytesIO(self._gen_data()) + compute_hashes_from_fileobj(f, chunk_size=512) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_vault.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_vault.py new file mode 100644 index 0000000000000000000000000000000000000000..f532e3b989cae163cabfaab5e264c57d92d13d48 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_vault.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import StringIO +from tests.compat import mock, unittest + +ANY = mock.ANY + +from boto.glacier import vault +from boto.glacier.job import Job +from boto.glacier.response import GlacierResponse + + +class TestVault(unittest.TestCase): + def setUp(self): + self.size_patch = mock.patch('os.path.getsize') + self.getsize = self.size_patch.start() + self.api = mock.Mock() + self.vault = vault.Vault(self.api, None) + self.vault.name = 'myvault' + self.mock_open = mock.mock_open() + stringio = StringIO('content') + self.mock_open.return_value.read = stringio.read + + def tearDown(self): + self.size_patch.stop() + + @mock.patch('boto.glacier.vault.compute_hashes_from_fileobj', + return_value=[b'abc', b'123']) + def test_upload_archive_small_file(self, compute_hashes): + self.getsize.return_value = 1 + + self.api.upload_archive.return_value = {'ArchiveId': 'archive_id'} + with mock.patch('boto.glacier.vault.open', self.mock_open, + create=True): + archive_id = self.vault.upload_archive( + 'filename', 'my description') + self.assertEqual(archive_id, 'archive_id') + self.api.upload_archive.assert_called_with( + 'myvault', self.mock_open.return_value, + mock.ANY, mock.ANY, 'my description') + + def test_small_part_size_is_obeyed(self): + self.vault.DefaultPartSize = 2 * 1024 * 1024 + self.vault.create_archive_writer = mock.Mock() + + self.getsize.return_value = 1 + + with mock.patch('boto.glacier.vault.open', self.mock_open, + create=True): + self.vault.create_archive_from_file('myfile') + # The write should be created with the default part size of the + # instance (2 MB). + self.vault.create_archive_writer.assert_called_with( + description=mock.ANY, part_size=self.vault.DefaultPartSize) + + def test_large_part_size_is_obeyed(self): + self.vault.DefaultPartSize = 8 * 1024 * 1024 + self.vault.create_archive_writer = mock.Mock() + self.getsize.return_value = 1 + with mock.patch('boto.glacier.vault.open', self.mock_open, + create=True): + self.vault.create_archive_from_file('myfile') + # The write should be created with the default part size of the + # instance (8 MB). + self.vault.create_archive_writer.assert_called_with( + description=mock.ANY, part_size=self.vault.DefaultPartSize) + + def test_part_size_needs_to_be_adjusted(self): + # If we have a large file (400 GB) + self.getsize.return_value = 400 * 1024 * 1024 * 1024 + self.vault.create_archive_writer = mock.Mock() + # When we try to upload the file. + with mock.patch('boto.glacier.vault.open', self.mock_open, + create=True): + self.vault.create_archive_from_file('myfile') + # We should automatically bump up the part size used to + # 64 MB. + expected_part_size = 64 * 1024 * 1024 + self.vault.create_archive_writer.assert_called_with( + description=mock.ANY, part_size=expected_part_size) + + def test_retrieve_inventory(self): + class FakeResponse(object): + status = 202 + + def getheader(self, key, default=None): + if key == 'x-amz-job-id': + return 'HkF9p6' + elif key == 'Content-Type': + return 'application/json' + + return 'something' + + def read(self, amt=None): + return b"""{ + "Action": "ArchiveRetrieval", + "ArchiveId": "NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-EXAMPLEArchiveId", + "ArchiveSizeInBytes": 16777216, + "ArchiveSHA256TreeHash": "beb0fe31a1c7ca8c6c04d574ea906e3f97", + "Completed": false, + "CreationDate": "2012-05-15T17:21:39.339Z", + "CompletionDate": "2012-05-15T17:21:43.561Z", + "InventorySizeInBytes": null, + "JobDescription": "My ArchiveRetrieval Job", + "JobId": "HkF9p6", + "RetrievalByteRange": "0-16777215", + "SHA256TreeHash": "beb0fe31a1c7ca8c6c04d574ea906e3f97b31fd", + "SNSTopic": "arn:aws:sns:us-east-1:012345678901:mytopic", + "StatusCode": "InProgress", + "StatusMessage": "Operation in progress.", + "VaultARN": "arn:aws:glacier:us-east-1:012345678901:vaults/examplevault" +}""" + + raw_resp = FakeResponse() + init_resp = GlacierResponse(raw_resp, [('x-amz-job-id', 'JobId')]) + raw_resp_2 = FakeResponse() + desc_resp = GlacierResponse(raw_resp_2, []) + + with mock.patch.object(self.vault.layer1, 'initiate_job', + return_value=init_resp): + with mock.patch.object(self.vault.layer1, 'describe_job', + return_value=desc_resp): + # The old/back-compat variant of the call. + self.assertEqual(self.vault.retrieve_inventory(), 'HkF9p6') + + # The variant the returns a full ``Job`` object. + job = self.vault.retrieve_inventory_job() + self.assertTrue(isinstance(job, Job)) + self.assertEqual(job.id, 'HkF9p6') + + +class TestConcurrentUploads(unittest.TestCase): + + def test_concurrent_upload_file(self): + v = vault.Vault(None, None) + with mock.patch('boto.glacier.vault.ConcurrentUploader') as c: + c.return_value.upload.return_value = 'archive_id' + archive_id = v.concurrent_create_archive_from_file( + 'filename', 'my description') + c.return_value.upload.assert_called_with('filename', + 'my description') + self.assertEqual(archive_id, 'archive_id') + + def test_concurrent_upload_forwards_kwargs(self): + v = vault.Vault(None, None) + with mock.patch('boto.glacier.vault.ConcurrentUploader') as c: + c.return_value.upload.return_value = 'archive_id' + archive_id = v.concurrent_create_archive_from_file( + 'filename', 'my description', num_threads=10, + part_size=1024 * 1024 * 1024 * 8) + c.assert_called_with(None, None, num_threads=10, + part_size=1024 * 1024 * 1024 * 8) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_writer.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..b2875f3c82ae8d0739bf6b104ce430ac830a9680 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/glacier/test_writer.py @@ -0,0 +1,229 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from hashlib import sha256 +import itertools +from boto.compat import StringIO + +from tests.unit import unittest +from mock import ( + call, + Mock, + sentinel, +) +from nose.tools import assert_equal + +from boto.glacier.layer1 import Layer1 +from boto.glacier.vault import Vault +from boto.glacier.writer import Writer, resume_file_upload +from boto.glacier.utils import bytes_to_hex, chunk_hashes, tree_hash + + +def create_mock_vault(): + vault = Mock(spec=Vault) + vault.layer1 = Mock(spec=Layer1) + vault.layer1.complete_multipart_upload.return_value = dict( + ArchiveId=sentinel.archive_id) + vault.name = sentinel.vault_name + return vault + + +def partify(data, part_size): + for i in itertools.count(0): + start = i * part_size + part = data[start:start + part_size] + if part: + yield part + else: + return + + +def calculate_mock_vault_calls(data, part_size, chunk_size): + upload_part_calls = [] + data_tree_hashes = [] + for i, data_part in enumerate(partify(data, part_size)): + start = i * part_size + end = start + len(data_part) + data_part_tree_hash_blob = tree_hash( + chunk_hashes(data_part, chunk_size)) + data_part_tree_hash = bytes_to_hex(data_part_tree_hash_blob) + data_part_linear_hash = sha256(data_part).hexdigest() + upload_part_calls.append( + call.layer1.upload_part( + sentinel.vault_name, sentinel.upload_id, + data_part_linear_hash, data_part_tree_hash, + (start, end - 1), data_part)) + data_tree_hashes.append(data_part_tree_hash_blob) + + return upload_part_calls, data_tree_hashes + + +def check_mock_vault_calls(vault, upload_part_calls, data_tree_hashes, + data_len): + vault.layer1.upload_part.assert_has_calls( + upload_part_calls, any_order=True) + assert_equal( + len(upload_part_calls), vault.layer1.upload_part.call_count) + + data_tree_hash = bytes_to_hex(tree_hash(data_tree_hashes)) + vault.layer1.complete_multipart_upload.assert_called_once_with( + sentinel.vault_name, sentinel.upload_id, data_tree_hash, data_len) + + +class TestWriter(unittest.TestCase): + def setUp(self): + super(TestWriter, self).setUp() + self.vault = create_mock_vault() + self.chunk_size = 2 # power of 2 + self.part_size = 4 # power of 2 + upload_id = sentinel.upload_id + self.writer = Writer( + self.vault, upload_id, self.part_size, self.chunk_size) + + def check_write(self, write_list): + for write_data in write_list: + self.writer.write(write_data) + self.writer.close() + + data = b''.join(write_list) + upload_part_calls, data_tree_hashes = calculate_mock_vault_calls( + data, self.part_size, self.chunk_size) + check_mock_vault_calls( + self.vault, upload_part_calls, data_tree_hashes, len(data)) + + def test_single_byte_write(self): + self.check_write([b'1']) + + def test_one_part_write(self): + self.check_write([b'1234']) + + def test_split_write_1(self): + self.check_write([b'1', b'234']) + + def test_split_write_2(self): + self.check_write([b'12', b'34']) + + def test_split_write_3(self): + self.check_write([b'123', b'4']) + + def test_one_part_plus_one_write(self): + self.check_write([b'12345']) + + def test_returns_archive_id(self): + self.writer.write(b'1') + self.writer.close() + self.assertEquals(sentinel.archive_id, self.writer.get_archive_id()) + + def test_current_tree_hash(self): + self.writer.write(b'1234') + self.writer.write(b'567') + hash_1 = self.writer.current_tree_hash + self.assertEqual(hash_1, + b'\x0e\xb0\x11Z\x1d\x1f\n\x10|\xf76\xa6\xf5' + + b'\x83\xd1\xd5"bU\x0c\x95\xa8<\xf5\x81\xef\x0e\x0f\x95\n\xb7k' + ) + + # This hash will be different, since the content has changed. + self.writer.write(b'22i3uy') + hash_2 = self.writer.current_tree_hash + self.assertEqual(hash_2, + b'\x7f\xf4\x97\x82U]\x81R\x05#^\xe8\x1c\xd19' + + b'\xe8\x1f\x9e\xe0\x1aO\xaad\xe5\x06"\xa5\xc0\xa8AdL' + ) + self.writer.close() + + # Check the final tree hash, post-close. + final_hash = self.writer.current_tree_hash + self.assertEqual(final_hash, + b';\x1a\xb8!=\xf0\x14#\x83\x11\xd5\x0b\x0f' + + b'\xc7D\xe4\x8e\xd1W\x99z\x14\x06\xb9D\xd0\xf0*\x93\xa2\x8e\xf9' + ) + # Then assert we don't get a different one on a subsequent call. + self.assertEqual(final_hash, self.writer.current_tree_hash) + + def test_current_uploaded_size(self): + self.writer.write(b'1234') + self.writer.write(b'567') + size_1 = self.writer.current_uploaded_size + self.assertEqual(size_1, 4) + + # This hash will be different, since the content has changed. + self.writer.write(b'22i3uy') + size_2 = self.writer.current_uploaded_size + self.assertEqual(size_2, 12) + self.writer.close() + + # Get the final size, post-close. + final_size = self.writer.current_uploaded_size + self.assertEqual(final_size, 13) + # Then assert we don't get a different one on a subsequent call. + self.assertEqual(final_size, self.writer.current_uploaded_size) + + def test_upload_id(self): + self.assertEquals(sentinel.upload_id, self.writer.upload_id) + + +class TestResume(unittest.TestCase): + def setUp(self): + super(TestResume, self).setUp() + self.vault = create_mock_vault() + self.chunk_size = 2 # power of 2 + self.part_size = 4 # power of 2 + + def check_no_resume(self, data, resume_set=set()): + fobj = StringIO(data.decode('utf-8')) + part_hash_map = {} + for part_index in resume_set: + start = self.part_size * part_index + end = start + self.part_size + part_data = data[start:end] + part_hash_map[part_index] = tree_hash( + chunk_hashes(part_data, self.chunk_size)) + + resume_file_upload( + self.vault, sentinel.upload_id, self.part_size, fobj, + part_hash_map, self.chunk_size) + + upload_part_calls, data_tree_hashes = calculate_mock_vault_calls( + data, self.part_size, self.chunk_size) + resume_upload_part_calls = [ + call for part_index, call in enumerate(upload_part_calls) + if part_index not in resume_set] + check_mock_vault_calls( + self.vault, resume_upload_part_calls, data_tree_hashes, len(data)) + + def test_one_part_no_resume(self): + self.check_no_resume(b'1234') + + def test_two_parts_no_resume(self): + self.check_no_resume(b'12345678') + + def test_one_part_resume(self): + self.check_no_resume(b'1234', resume_set=set([0])) + + def test_two_parts_one_resume(self): + self.check_no_resume(b'12345678', resume_set=set([1])) + + def test_returns_archive_id(self): + archive_id = resume_file_upload( + self.vault, sentinel.upload_id, self.part_size, StringIO('1'), {}, + self.chunk_size) + self.assertEquals(sentinel.archive_id, archive_id) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/iam/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/iam/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/iam/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/iam/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..5c760811c4cc3fcb7c9804aa3b3f8b64c310832a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/iam/test_connection.py @@ -0,0 +1,481 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from base64 import b64decode +from boto.compat import json +from boto.iam.connection import IAMConnection +from tests.unit import AWSMockServiceTestCase + + +class TestCreateSamlProvider(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + arn + + + 29f47818-99f5-11e1-a4c3-27EXAMPLE804 + + + """ + + def test_create_saml_provider(self): + self.set_http_response(status_code=200) + response = self.service_connection.create_saml_provider('document', 'name') + + self.assert_request_parameters( + {'Action': 'CreateSAMLProvider', + 'SAMLMetadataDocument': 'document', + 'Name': 'name'}, + ignore_params_values=['Version']) + + self.assertEqual(response['create_saml_provider_response'] + ['create_saml_provider_result'] + ['saml_provider_arn'], 'arn') + + +class TestListSamlProviders(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + + + arn:aws:iam::123456789012:instance-profile/application_abc/component_xyz/Database + 2032-05-09T16:27:11Z + 2012-05-09T16:27:03Z + + + arn:aws:iam::123456789012:instance-profile/application_abc/component_xyz/Webserver + 2015-03-11T13:11:02Z + 2012-05-09T16:27:11Z + + + + + fd74fa8d-99f3-11e1-a4c3-27EXAMPLE804 + + + """ + + def test_list_saml_providers(self): + self.set_http_response(status_code=200) + response = self.service_connection.list_saml_providers() + + self.assert_request_parameters( + {'Action': 'ListSAMLProviders'}, + ignore_params_values=['Version']) + self.assertEqual(response.saml_provider_list, [ + {'arn': 'arn:aws:iam::123456789012:instance-profile/application_abc/component_xyz/Database', + 'valid_until': '2032-05-09T16:27:11Z', + 'create_date': '2012-05-09T16:27:03Z'}, + {'arn': 'arn:aws:iam::123456789012:instance-profile/application_abc/component_xyz/Webserver', + 'valid_until': '2015-03-11T13:11:02Z', + 'create_date': '2012-05-09T16:27:11Z'}]) + + +class TestGetSamlProvider(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + 2012-05-09T16:27:11Z + 2015-12-31T211:59:59Z + Pd9fexDssTkRgGNqs...DxptfEs== + + + 29f47818-99f5-11e1-a4c3-27EXAMPLE804 + + + """ + + def test_get_saml_provider(self): + self.set_http_response(status_code=200) + self.service_connection.get_saml_provider('arn') + + self.assert_request_parameters( + { + 'Action': 'GetSAMLProvider', + 'SAMLProviderArn': 'arn' + }, + ignore_params_values=['Version']) + + +class TestUpdateSamlProvider(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + arn:aws:iam::123456789012:saml-metadata/MyUniversity + + + 29f47818-99f5-11e1-a4c3-27EXAMPLE804 + + + """ + + def test_update_saml_provider(self): + self.set_http_response(status_code=200) + self.service_connection.update_saml_provider('arn', 'doc') + + self.assert_request_parameters( + { + 'Action': 'UpdateSAMLProvider', + 'SAMLMetadataDocument': 'doc', + 'SAMLProviderArn': 'arn' + }, + ignore_params_values=['Version']) + + +class TestDeleteSamlProvider(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return "" + + def test_delete_saml_provider(self): + self.set_http_response(status_code=200) + self.service_connection.delete_saml_provider('arn') + + self.assert_request_parameters( + { + 'Action': 'DeleteSAMLProvider', + 'SAMLProviderArn': 'arn' + }, + ignore_params_values=['Version']) + + +class TestCreateRole(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + + /application_abc/component_xyz/ + arn:aws:iam::123456789012:role/application_abc/component_xyz/S3Access + S3Access + {"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"Service":["ec2.amazonaws.com"]},"Action":["sts:AssumeRole"]}]} + 2012-05-08T23:34:01.495Z + AROADBQP57FF2AEXAMPLE + + + + 4a93ceee-9966-11e1-b624-b1aEXAMPLE7c + + + """ + + def test_create_role_default(self): + self.set_http_response(status_code=200) + self.service_connection.create_role('a_name') + + self.assert_request_parameters( + {'Action': 'CreateRole', + 'RoleName': 'a_name'}, + ignore_params_values=['Version', 'AssumeRolePolicyDocument']) + self.assertDictEqual(json.loads(self.actual_request.params["AssumeRolePolicyDocument"]), {"Statement": [{"Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": {"Service": ["ec2.amazonaws.com"]}}]}) + + def test_create_role_default_cn_north(self): + self.set_http_response(status_code=200) + self.service_connection.host = 'iam.cn-north-1.amazonaws.com.cn' + self.service_connection.create_role('a_name') + + self.assert_request_parameters( + {'Action': 'CreateRole', + 'RoleName': 'a_name'}, + ignore_params_values=['Version', 'AssumeRolePolicyDocument']) + self.assertDictEqual(json.loads(self.actual_request.params["AssumeRolePolicyDocument"]), {"Statement": [{"Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": {"Service": ["ec2.amazonaws.com.cn"]}}]}) + + def test_create_role_string_policy(self): + self.set_http_response(status_code=200) + self.service_connection.create_role( + 'a_name', + # Historical usage. + assume_role_policy_document='{"hello": "policy"}' + ) + + self.assert_request_parameters( + {'Action': 'CreateRole', + 'AssumeRolePolicyDocument': '{"hello": "policy"}', + 'RoleName': 'a_name'}, + ignore_params_values=['Version']) + + def test_create_role_data_policy(self): + self.set_http_response(status_code=200) + self.service_connection.create_role( + 'a_name', + # With plain data, we should dump it for them. + assume_role_policy_document={"hello": "policy"} + ) + + self.assert_request_parameters( + {'Action': 'CreateRole', + 'AssumeRolePolicyDocument': '{"hello": "policy"}', + 'RoleName': 'a_name'}, + ignore_params_values=['Version']) + + +class TestGetSigninURL(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + false + + foocorporation + anotherunused + + + + c5a076e9-f1b0-11df-8fbe-45274EXAMPLE + + + """ + + def test_get_signin_url_default(self): + self.set_http_response(status_code=200) + url = self.service_connection.get_signin_url() + self.assertEqual( + url, + 'https://foocorporation.signin.aws.amazon.com/console/ec2' + ) + + def test_get_signin_url_s3(self): + self.set_http_response(status_code=200) + url = self.service_connection.get_signin_url(service='s3') + self.assertEqual( + url, + 'https://foocorporation.signin.aws.amazon.com/console/s3' + ) + + def test_get_signin_url_cn_north(self): + self.set_http_response(status_code=200) + self.service_connection.host = 'iam.cn-north-1.amazonaws.com.cn' + url = self.service_connection.get_signin_url() + self.assertEqual( + url, + 'https://foocorporation.signin.amazonaws.cn/console/ec2' + ) + + +class TestGetSigninURLNoAliases(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + false + + + + c5a076e9-f1b0-11df-8fbe-45274EXAMPLE + + + """ + + def test_get_signin_url_no_aliases(self): + self.set_http_response(status_code=200) + + with self.assertRaises(Exception): + self.service_connection.get_signin_url() + + +class TestGenerateCredentialReport(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + COMPLETE + + + b62e22a3-0da1-11e4-ba55-0990EXAMPLE + + + """ + + def test_generate_credential_report(self): + self.set_http_response(status_code=200) + response = self.service_connection.generate_credential_report() + self.assertEquals(response['generate_credential_report_response'] + ['generate_credential_report_result'] + ['state'], 'COMPLETE') + + +class TestGetCredentialReport(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + 99e60e9a-0db5-11e4-94d4-b764EXAMPLE + + + RXhhbXBsZQ== + text/csv + 2014-07-17T11:09:11Z + + + """ + + def test_get_credential_report(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_credential_report() + b64decode(response['get_credential_report_response'] + ['get_credential_report_result'] + ['content']) + +class TestCreateVirtualMFADevice(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + + arn:aws:iam::123456789012:mfa/ExampleName + 2K5K5XTLA7GGE75TQLYEXAMPLEEXAMPLEEXAMPLECHDFW4KJYZ6 + UFQ75LL7COCYKM + 89504E470D0A1A0AASDFAHSDFKJKLJFKALSDFJASDF + + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + """ + + def test_create_virtual_mfa_device(self): + self.set_http_response(status_code=200) + response = self.service_connection.create_virtual_mfa_device('/', 'ExampleName') + self.assert_request_parameters( + {'Path': '/', + 'VirtualMFADeviceName': 'ExampleName', + 'Action': 'CreateVirtualMFADevice'}, + ignore_params_values=['Version']) + self.assertEquals(response['create_virtual_mfa_device_response'] + ['create_virtual_mfa_device_result'] + ['virtual_mfa_device'] + ['serial_number'], 'arn:aws:iam::123456789012:mfa/ExampleName') + +class TestGetAccountPasswordPolicy(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + + true + true + true + false + 12 + true + 90 + false + true + 12 + + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + """ + + def test_get_account_password_policy(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_account_password_policy() + + self.assert_request_parameters( + { + 'Action': 'GetAccountPasswordPolicy', + }, + ignore_params_values=['Version']) + self.assertEquals(response['get_account_password_policy_response'] + ['get_account_password_policy_result']['password_policy'] + ['minimum_password_length'], '12') + + +class TestUpdateAccountPasswordPolicy(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + """ + + def test_update_account_password_policy(self): + self.set_http_response(status_code=200) + response = self.service_connection.update_account_password_policy(minimum_password_length=88) + + self.assert_request_parameters( + { + 'Action': 'UpdateAccountPasswordPolicy', + 'MinimumPasswordLength': 88 + }, + ignore_params_values=['Version']) + + +class TestDeleteAccountPasswordPolicy(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return b""" + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + """ + + def test_delete_account_password_policy(self): + self.set_http_response(status_code=200) + response = self.service_connection.delete_account_password_policy() + + self.assert_request_parameters( + { + 'Action': 'DeleteAccountPasswordPolicy' + }, + ignore_params_values=['Version']) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/kinesis/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/kinesis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/kinesis/test_kinesis.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/kinesis/test_kinesis.py new file mode 100644 index 0000000000000000000000000000000000000000..6ad8adf9ba71f524756cf8c5132408395f6fa181 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/kinesis/test_kinesis.py @@ -0,0 +1,74 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.compat import json +from boto.kinesis.layer1 import KinesisConnection +from tests.unit import AWSMockServiceTestCase + + +class TestKinesis(AWSMockServiceTestCase): + connection_class = KinesisConnection + + def default_body(self): + return b'{}' + + def test_put_record_binary(self): + self.set_http_response(status_code=200) + self.service_connection.put_record('stream-name', + b'\x00\x01\x02\x03\x04\x05', 'partition-key') + + body = json.loads(self.actual_request.body) + self.assertEqual(body['Data'], 'AAECAwQF') + + target = self.actual_request.headers['X-Amz-Target'] + self.assertTrue('PutRecord' in target) + + def test_put_record_string(self): + self.set_http_response(status_code=200) + self.service_connection.put_record('stream-name', + 'data', 'partition-key') + + body = json.loads(self.actual_request.body) + self.assertEqual(body['Data'], 'ZGF0YQ==') + + target = self.actual_request.headers['X-Amz-Target'] + self.assertTrue('PutRecord' in target) + + def test_put_records(self): + self.set_http_response(status_code=200) + record_binary = { + 'Data': b'\x00\x01\x02\x03\x04\x05', + 'PartitionKey': 'partition-key' + } + record_str = { + 'Data': 'data', + 'PartitionKey': 'partition-key' + } + self.service_connection.put_records(stream_name='stream-name', + records=[record_binary, record_str]) + + body = json.loads(self.actual_request.body) + self.assertEqual(body['Records'][0]['Data'], 'AAECAwQF') + self.assertEqual(body['Records'][1]['Data'], 'ZGF0YQ==') + + target = self.actual_request.headers['X-Amz-Target'] + self.assertTrue('PutRecord' in target) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/kms/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/kms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70cc23febffdfb7a2de035d163e75a400a9c82ee --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/kms/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/kms/test_kms.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/kms/test_kms.py new file mode 100644 index 0000000000000000000000000000000000000000..c46e831a2924837f9c13201d2908f03f72d4a831 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/kms/test_kms.py @@ -0,0 +1,63 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.compat import json +from boto.kms.layer1 import KMSConnection +from tests.unit import AWSMockServiceTestCase + + +class TestKinesis(AWSMockServiceTestCase): + connection_class = KMSConnection + + def default_body(self): + return b'{}' + + def test_binary_input(self): + """ + This test ensures that binary is base64 encoded when it is sent to + the service. + """ + self.set_http_response(status_code=200) + data = b'\x00\x01\x02\x03\x04\x05' + self.service_connection.encrypt(key_id='foo', plaintext=data) + body = json.loads(self.actual_request.body) + self.assertEqual(body['Plaintext'], 'AAECAwQF') + + def test_non_binary_input_for_blobs_fails(self): + """ + This test ensures that only binary is used for blob type parameters. + """ + self.set_http_response(status_code=200) + data = u'\u00e9' + with self.assertRaises(TypeError): + self.service_connection.encrypt(key_id='foo', plaintext=data) + + def test_binary_ouput(self): + """ + This test ensures that the output is base64 decoded before + it is returned to the user. + """ + content = {'Plaintext': 'AAECAwQF'} + self.set_http_response(status_code=200, + body=json.dumps(content).encode('utf-8')) + response = self.service_connection.decrypt(b'some arbitrary value') + self.assertEqual(response['Plaintext'], b'\x00\x01\x02\x03\x04\x05') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/logs/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/logs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/logs/test_layer1.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/logs/test_layer1.py new file mode 100644 index 0000000000000000000000000000000000000000..7aae5b09d8aedab1977a41e91f135e5ec064e596 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/logs/test_layer1.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python + +from boto.logs.layer1 import CloudWatchLogsConnection +from tests.unit import AWSMockServiceTestCase + + +class TestDescribeLogs(AWSMockServiceTestCase): + connection_class = CloudWatchLogsConnection + + def default_body(self): + return b'{"logGroups": []}' + + def test_describe(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.describe_log_groups() + + self.assertEqual(0, len(api_response['logGroups'])) + + self.assert_request_parameters({}) + + target = self.actual_request.headers['X-Amz-Target'] + self.assertTrue('DescribeLogGroups' in target) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/machinelearning/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/machinelearning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a3575e2ed1f4fb166a786d2a051f6d43d682ece2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/machinelearning/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/machinelearning/test_machinelearning.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/machinelearning/test_machinelearning.py new file mode 100644 index 0000000000000000000000000000000000000000..6a4d97c09c2f3a5d6e02eb8b231e9aee330a4e56 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/machinelearning/test_machinelearning.py @@ -0,0 +1,45 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.compat import json +from boto.machinelearning.layer1 import MachineLearningConnection +from tests.unit import AWSMockServiceTestCase + + +class TestMachineLearning(AWSMockServiceTestCase): + connection_class = MachineLearningConnection + + def test_predict(self): + ml_endpoint = 'mymlmodel.amazonaws.com' + self.set_http_response(status_code=200, body=b'') + self.service_connection.predict( + ml_model_id='foo', record={'Foo': 'bar'}, + predict_endpoint=ml_endpoint) + self.assertEqual(self.actual_request.host, ml_endpoint) + + def test_predict_with_scheme_in_endpoint(self): + ml_endpoint = 'mymlmodel.amazonaws.com' + self.set_http_response(status_code=200, body=b'') + self.service_connection.predict( + ml_model_id='foo', record={'Foo': 'bar'}, + predict_endpoint='https://' + ml_endpoint) + self.assertEqual(self.actual_request.host, ml_endpoint) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/manage/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/manage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/manage/test_ssh.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/manage/test_ssh.py new file mode 100644 index 0000000000000000000000000000000000000000..b1db654e48b0bfdc8984d2bde738a61dd82d153a --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/manage/test_ssh.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +try: + import paramiko + from boto.manage.cmdshell import SSHClient +except ImportError: + paramiko = None + SSHClient = None + +from tests.compat import mock, unittest + + +class TestSSHTimeout(unittest.TestCase): + @unittest.skipIf(not paramiko, 'Paramiko missing') + def test_timeout(self): + client_tmp = paramiko.SSHClient + + def client_mock(): + client = client_tmp() + client.connect = mock.Mock(name='connect') + return client + + paramiko.SSHClient = client_mock + paramiko.RSAKey.from_private_key_file = mock.Mock() + + server = mock.Mock() + test = SSHClient(server) + + self.assertEqual(test._ssh_client.connect.call_args[1]['timeout'], None) + + test2 = SSHClient(server, timeout=30) + + self.assertEqual(test2._ssh_client.connect.call_args[1]['timeout'], 30) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/mturk/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/mturk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/mturk/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/mturk/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..05f6be259ec1279348476b87da12bed32d815c7d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/mturk/test_connection.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- + +from tests.unit import AWSMockServiceTestCase + +from boto.mturk.connection import MTurkConnection + + +GET_FILE_UPLOAD_URL = b""" + + + True + + http://s3.amazonaws.com/myawsbucket/puppy.jpg +""" + + +class TestMTurkConnection(AWSMockServiceTestCase): + connection_class = MTurkConnection + + def setUp(self): + super(TestMTurkConnection, self).setUp() + + def test_get_file_upload_url_success(self): + self.set_http_response(status_code=200, body=GET_FILE_UPLOAD_URL) + rset = self.service_connection.get_file_upload_url('aid', 'qid') + self.assertEquals(len(rset), 1) + self.assertEquals(rset[0].FileUploadURL, + 'http://s3.amazonaws.com/myawsbucket/puppy.jpg') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/mws/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/mws/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/mws/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/mws/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..fade578d3b333b83529562b11626f751fa8a8404 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/mws/test_connection.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.mws.connection import MWSConnection, api_call_map, destructure_object +from boto.mws.response import (ResponseElement, GetFeedSubmissionListResult, + ResponseFactory) +from boto.exception import BotoServerError + +from tests.compat import unittest + +from tests.unit import AWSMockServiceTestCase + +from mock import MagicMock + + +class TestMWSConnection(AWSMockServiceTestCase): + connection_class = MWSConnection + mws = True + + def default_body(self): + return b""" + + + 2YgYW55IGNhcm5hbCBwbGVhc3VyZS4= + true + + 2291326430 + _POST_PRODUCT_DATA_ + 2009-02-20T02:10:35+00:00 + _SUBMITTED_ + + + + 1105b931-6f1c-4480-8e97-f3b467840a9e + +""" + + def default_body_error(self): + return b""" + + + + Sender + string + string + + + + + + string +""" + + def test_destructure_object(self): + # Test that parsing of user input to Amazon input works. + response = ResponseElement() + response.C = 'four' + response.D = 'five' + inputs = [ + ('A', 'B'), ['B', 'A'], set(['C']), + False, 'String', {'A': 'one', 'B': 'two'}, + response, + {'A': 'one', 'B': 'two', + 'C': [{'D': 'four', 'E': 'five'}, + {'F': 'six', 'G': 'seven'}]}, + ] + outputs = [ + {'Prefix.1': 'A', 'Prefix.2': 'B'}, + {'Prefix.1': 'B', 'Prefix.2': 'A'}, + {'Prefix.1': 'C'}, + {'Prefix': 'false'}, {'Prefix': 'String'}, + {'Prefix.A': 'one', 'Prefix.B': 'two'}, + {'Prefix.C': 'four', 'Prefix.D': 'five'}, + {'Prefix.A': 'one', 'Prefix.B': 'two', + 'Prefix.C.member.1.D': 'four', + 'Prefix.C.member.1.E': 'five', + 'Prefix.C.member.2.F': 'six', + 'Prefix.C.member.2.G': 'seven'} + ] + for user, amazon in zip(inputs, outputs): + result = {} + members = user is inputs[-1] + destructure_object(user, result, prefix='Prefix', members=members) + self.assertEqual(result, amazon) + + def test_decorator_order(self): + for action, func in api_call_map.items(): + func = getattr(self.service_connection, func) + decs = [func.__name__] + while func: + i = 0 + if not hasattr(func, '__closure__'): + func = getattr(func, '__wrapped__', None) + continue + while i < len(func.__closure__): + value = func.__closure__[i].cell_contents + if hasattr(value, '__call__'): + if 'requires' == value.__name__: + self.assertTrue(not decs or decs[-1] == 'requires') + decs.append(value.__name__) + i += 1 + func = getattr(func, '__wrapped__', None) + + def test_built_api_call_map(self): + # Ensure that the map is populated. + # It starts empty, but the decorators should add to it as they're + # applied. As of 2013/10/21, there were 52 calls (with more likely + # to be added), so let's simply ensure there are enough there. + self.assertTrue(len(api_call_map.keys()) > 50) + + def test_method_for(self): + # First, ensure that the map is in "right enough" state. + self.assertTrue('GetFeedSubmissionList' in api_call_map) + + # Make sure we can find the correct method. + func = self.service_connection.method_for('GetFeedSubmissionList') + # Ensure the right name was found. + self.assertTrue(callable(func)) + ideal = self.service_connection.get_feed_submission_list + self.assertEqual(func, ideal) + + # Check a non-existent action. + func = self.service_connection.method_for('NotHereNorThere') + self.assertEqual(func, None) + + def test_response_factory(self): + connection = self.service_connection + body = self.default_body() + action = 'GetFeedSubmissionList' + parser = connection._response_factory(action, connection=connection) + response = connection._parse_response(parser, 'text/xml', body) + self.assertEqual(response._action, action) + self.assertEqual(response.__class__.__name__, action + 'Response') + self.assertEqual(response._result.__class__, + GetFeedSubmissionListResult) + + class MyResult(GetFeedSubmissionListResult): + _hello = '_world' + + scope = {'GetFeedSubmissionListResult': MyResult} + connection._setup_factories([scope]) + + parser = connection._response_factory(action, connection=connection) + response = connection._parse_response(parser, 'text/xml', body) + self.assertEqual(response._action, action) + self.assertEqual(response.__class__.__name__, action + 'Response') + self.assertEqual(response._result.__class__, MyResult) + self.assertEqual(response._result._hello, '_world') + self.assertEqual(response._result.HasNext, 'true') + + def test_get_service_status(self): + with self.assertRaises(AttributeError) as err: + self.service_connection.get_service_status() + + self.assertTrue('products' in str(err.exception)) + self.assertTrue('inventory' in str(err.exception)) + self.assertTrue('feeds' in str(err.exception)) + + def test_post_request(self): + + self.service_connection._mexe = MagicMock( + side_effect= + BotoServerError(500, 'You request has bee throttled', body=self.default_body_error())) + + with self.assertRaises(BotoServerError) as err: + self.service_connection.get_lowest_offer_listings_for_asin( + MarketplaceId='12345', + ASINList='ASIN12345', + condition='Any', + SellerId='1234', + excludeme='True') + + self.assertTrue('throttled' in str(err.reason)) + self.assertEqual(int(err.status), 200) + + def test_sandboxify(self): + # Create one-off connection class that has self._sandboxed = True + conn = MWSConnection(https_connection_factory=self.https_connection_factory, + aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + sandbox=True) + self.assertEqual(conn._sandboxify('a/bogus/path'), 'a/bogus_Sandbox/path') + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/mws/test_response.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/mws/test_response.py new file mode 100644 index 0000000000000000000000000000000000000000..c2bb8e292a49a4fa2a33de0062488165f655bef2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/mws/test_response.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python +from boto.mws.connection import MWSConnection +from boto.mws.response import (ResponseFactory, ResponseElement, Element, + MemberList, ElementList, SimpleList) + + +from tests.unit import AWSMockServiceTestCase +from boto.compat import filter, map +from tests.compat import unittest + + +class TestMWSResponse(AWSMockServiceTestCase): + connection_class = MWSConnection + mws = True + + def test_parsing_nested_elements(self): + class Test9one(ResponseElement): + Nest = Element() + Zoom = Element() + + class Test9Result(ResponseElement): + Item = Element(Test9one) + + text = b""" + + Bar + + Zap + Zoo + + Bam + + """ + obj = self.check_issue(Test9Result, text) + Item = obj._result.Item + useful = lambda x: not x[0].startswith('_') + nest = dict(filter(useful, Item.Nest.__dict__.items())) + self.assertEqual(nest, dict(Zip='Zap', Zam='Zoo')) + useful = lambda x: not x[0].startswith('_') and not x[0] == 'Nest' + item = dict(filter(useful, Item.__dict__.items())) + self.assertEqual(item, dict(Foo='Bar', Bif='Bam', Zoom=None)) + + def test_parsing_member_list_specification(self): + class Test8extra(ResponseElement): + Foo = SimpleList() + + class Test8Result(ResponseElement): + Item = MemberList(SimpleList) + Extra = MemberList(Test8extra) + + text = b""" + + 0 + 1 + 2 + 3 + + + 45 + + 67 + + """ + obj = self.check_issue(Test8Result, text) + self.assertSequenceEqual( + list(map(int, obj._result.Item)), + list(range(4)), + ) + self.assertSequenceEqual( + list(map(lambda x: list(map(int, x.Foo)), obj._result.Extra)), + [[4, 5], [], [6, 7]], + ) + + def test_parsing_nested_lists(self): + class Test7Result(ResponseElement): + Item = MemberList(Nest=MemberList(), + List=ElementList(Simple=SimpleList())) + + text = b""" + + + One + + 2 + 4 + 6 + + + + Two + + 1 + 3 + 5 + + + 4 + 5 + 6 + + + 7 + 8 + 9 + + + + Six + + Foo + 1 + 2 + 3 + + + Bar + + + + """ + obj = self.check_issue(Test7Result, text) + item = obj._result.Item + self.assertEqual(len(item), 3) + nests = [z.Nest for z in filter(lambda x: x.Nest, item)] + self.assertSequenceEqual( + [[y.Data for y in nest] for nest in nests], + [[u'2', u'4', u'6'], [u'1', u'3', u'5']], + ) + self.assertSequenceEqual( + [element.Simple for element in item[1].List], + [[u'4', u'5', u'6'], [u'7', u'8', u'9']], + ) + self.assertSequenceEqual( + item[-1].List[0].Simple, + ['1', '2', '3'], + ) + self.assertEqual(item[-1].List[1].Simple, []) + self.assertSequenceEqual( + [e.Value for e in obj._result.Item], + ['One', 'Two', 'Six'], + ) + + def test_parsing_member_list(self): + class Test6Result(ResponseElement): + Item = MemberList() + + text = b""" + + One + Two + Four + + Six + + """ + obj = self.check_issue(Test6Result, text) + self.assertSequenceEqual( + [e.Value for e in obj._result.Item], + ['One', 'Two', 'Six'], + ) + self.assertTrue(obj._result.Item[1].Error == 'Four') + with self.assertRaises(AttributeError) as e: + obj._result.Item[2].Error + + def test_parsing_empty_member_list(self): + class Test5Result(ResponseElement): + Item = MemberList(Nest=MemberList()) + + text = b""" + + """ + obj = self.check_issue(Test5Result, text) + self.assertSequenceEqual(obj._result.Item, []) + + def test_parsing_missing_member_list(self): + class Test4Result(ResponseElement): + Item = MemberList(NestedItem=MemberList()) + + text = b""" + """ + obj = self.check_issue(Test4Result, text) + self.assertSequenceEqual(obj._result.Item, []) + + def test_parsing_element_lists(self): + class Test1Result(ResponseElement): + Item = ElementList() + + text = b""" + Bar + Bif + Baz + Zoo + """ + obj = self.check_issue(Test1Result, text) + self.assertTrue(len(obj._result.Item) == 3) + elements = lambda x: getattr(x, 'Foo', getattr(x, 'Zip', '?')) + elements = list(map(elements, obj._result.Item)) + self.assertSequenceEqual(elements, ['Bar', 'Bif', 'Baz']) + + def test_parsing_missing_lists(self): + class Test2Result(ResponseElement): + Item = ElementList() + + text = b""" + """ + obj = self.check_issue(Test2Result, text) + self.assertEqual(obj._result.Item, []) + + def test_parsing_simple_lists(self): + class Test3Result(ResponseElement): + Item = SimpleList() + + text = b""" + Bar + Bif + Baz + """ + obj = self.check_issue(Test3Result, text) + self.assertSequenceEqual(obj._result.Item, ['Bar', 'Bif', 'Baz']) + + def check_issue(self, klass, text): + action = klass.__name__[:-len('Result')] + factory = ResponseFactory(scopes=[{klass.__name__: klass}]) + parser = factory(action, connection=self.service_connection) + return self.service_connection._parse_response(parser, 'text/xml', text) + + +if __name__ == "__main__": + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/provider/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/provider/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/provider/test_provider.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/provider/test_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..89092253e44a297d5a735c0a5e4538a866368952 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/provider/test_provider.py @@ -0,0 +1,441 @@ +#!/usr/bin/env python +from datetime import datetime, timedelta + +from tests.compat import mock, unittest +import os + +from boto import provider +from boto.compat import expanduser + + +INSTANCE_CONFIG = { + 'allowall': { + u'AccessKeyId': u'iam_access_key', + u'Code': u'Success', + u'Expiration': u'2012-09-01T03:57:34Z', + u'LastUpdated': u'2012-08-31T21:43:40Z', + u'SecretAccessKey': u'iam_secret_key', + u'Token': u'iam_token', + u'Type': u'AWS-HMAC' + } +} + + +class TestProvider(unittest.TestCase): + def setUp(self): + self.environ = {} + self.config = {} + self.shared_config = {} + + self.metadata_patch = mock.patch('boto.utils.get_instance_metadata') + self.config_patch = mock.patch('boto.provider.config.get', + self.get_config) + self.has_config_patch = mock.patch('boto.provider.config.has_option', + self.has_config) + self.config_object_patch = mock.patch.object( + provider.Config, 'get', self.get_shared_config) + self.has_config_object_patch = mock.patch.object( + provider.Config, 'has_option', self.has_shared_config) + self.environ_patch = mock.patch('os.environ', self.environ) + + self.get_instance_metadata = self.metadata_patch.start() + self.get_instance_metadata.return_value = None + self.config_patch.start() + self.has_config_patch.start() + self.config_object_patch.start() + self.has_config_object_patch.start() + self.environ_patch.start() + + + def tearDown(self): + self.metadata_patch.stop() + self.config_patch.stop() + self.has_config_patch.stop() + self.config_object_patch.stop() + self.has_config_object_patch.stop() + self.environ_patch.stop() + + def has_config(self, section_name, key): + try: + self.config[section_name][key] + return True + except KeyError: + return False + + def get_config(self, section_name, key): + try: + return self.config[section_name][key] + except KeyError: + return None + + def has_shared_config(self, section_name, key): + try: + self.shared_config[section_name][key] + return True + except KeyError: + return False + + def get_shared_config(self, section_name, key): + try: + return self.shared_config[section_name][key] + except KeyError: + return None + + def test_passed_in_values_are_used(self): + p = provider.Provider('aws', 'access_key', 'secret_key', 'security_token') + self.assertEqual(p.access_key, 'access_key') + self.assertEqual(p.secret_key, 'secret_key') + self.assertEqual(p.security_token, 'security_token') + + def test_environment_variables_are_used(self): + self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key' + self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key' + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'env_access_key') + self.assertEqual(p.secret_key, 'env_secret_key') + self.assertIsNone(p.security_token) + + def test_environment_variable_aws_security_token(self): + self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key' + self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key' + self.environ['AWS_SECURITY_TOKEN'] = 'env_security_token' + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'env_access_key') + self.assertEqual(p.secret_key, 'env_secret_key') + self.assertEqual(p.security_token, 'env_security_token') + + def test_no_credentials_provided(self): + p = provider.Provider( + 'aws', + provider.NO_CREDENTIALS_PROVIDED, + provider.NO_CREDENTIALS_PROVIDED, + provider.NO_CREDENTIALS_PROVIDED + ) + self.assertEqual(p.access_key, provider.NO_CREDENTIALS_PROVIDED) + self.assertEqual(p.secret_key, provider.NO_CREDENTIALS_PROVIDED) + self.assertEqual(p.security_token, provider.NO_CREDENTIALS_PROVIDED) + + def test_config_profile_values_are_used(self): + self.config = { + 'profile dev': { + 'aws_access_key_id': 'dev_access_key', + 'aws_secret_access_key': 'dev_secret_key', + }, 'profile prod': { + 'aws_access_key_id': 'prod_access_key', + 'aws_secret_access_key': 'prod_secret_key', + }, 'profile prod_withtoken': { + 'aws_access_key_id': 'prod_access_key', + 'aws_secret_access_key': 'prod_secret_key', + 'aws_security_token': 'prod_token', + }, 'Credentials': { + 'aws_access_key_id': 'default_access_key', + 'aws_secret_access_key': 'default_secret_key' + } + } + p = provider.Provider('aws', profile_name='prod') + self.assertEqual(p.access_key, 'prod_access_key') + self.assertEqual(p.secret_key, 'prod_secret_key') + p = provider.Provider('aws', profile_name='prod_withtoken') + self.assertEqual(p.access_key, 'prod_access_key') + self.assertEqual(p.secret_key, 'prod_secret_key') + self.assertEqual(p.security_token, 'prod_token') + q = provider.Provider('aws', profile_name='dev') + self.assertEqual(q.access_key, 'dev_access_key') + self.assertEqual(q.secret_key, 'dev_secret_key') + + def test_config_missing_profile(self): + # None of these default profiles should be loaded! + self.shared_config = { + 'default': { + 'aws_access_key_id': 'shared_access_key', + 'aws_secret_access_key': 'shared_secret_key', + } + } + self.config = { + 'Credentials': { + 'aws_access_key_id': 'default_access_key', + 'aws_secret_access_key': 'default_secret_key' + } + } + with self.assertRaises(provider.ProfileNotFoundError): + provider.Provider('aws', profile_name='doesntexist') + + def test_config_values_are_used(self): + self.config = { + 'Credentials': { + 'aws_access_key_id': 'cfg_access_key', + 'aws_secret_access_key': 'cfg_secret_key', + } + } + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'cfg_access_key') + self.assertEqual(p.secret_key, 'cfg_secret_key') + self.assertIsNone(p.security_token) + + def test_config_value_security_token_is_used(self): + self.config = { + 'Credentials': { + 'aws_access_key_id': 'cfg_access_key', + 'aws_secret_access_key': 'cfg_secret_key', + 'aws_security_token': 'cfg_security_token', + } + } + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'cfg_access_key') + self.assertEqual(p.secret_key, 'cfg_secret_key') + self.assertEqual(p.security_token, 'cfg_security_token') + + def test_keyring_is_used(self): + self.config = { + 'Credentials': { + 'aws_access_key_id': 'cfg_access_key', + 'keyring': 'test', + } + } + import sys + try: + import keyring + imported = True + except ImportError: + sys.modules['keyring'] = keyring = type(mock)('keyring', '') + imported = False + + try: + with mock.patch('keyring.get_password', create=True): + keyring.get_password.side_effect = ( + lambda kr, login: kr+login+'pw') + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'cfg_access_key') + self.assertEqual(p.secret_key, 'testcfg_access_keypw') + self.assertIsNone(p.security_token) + finally: + if not imported: + del sys.modules['keyring'] + + def test_passed_in_values_beat_env_vars(self): + self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key' + self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key' + self.environ['AWS_SECURITY_TOKEN'] = 'env_security_token' + p = provider.Provider('aws', 'access_key', 'secret_key') + self.assertEqual(p.access_key, 'access_key') + self.assertEqual(p.secret_key, 'secret_key') + self.assertEqual(p.security_token, None) + + def test_env_vars_beat_shared_creds_values(self): + self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key' + self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key' + self.shared_config = { + 'default': { + 'aws_access_key_id': 'shared_access_key', + 'aws_secret_access_key': 'shared_secret_key', + } + } + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'env_access_key') + self.assertEqual(p.secret_key, 'env_secret_key') + self.assertIsNone(p.security_token) + + def test_shared_creds_beat_config_values(self): + self.shared_config = { + 'default': { + 'aws_access_key_id': 'shared_access_key', + 'aws_secret_access_key': 'shared_secret_key', + } + } + self.config = { + 'Credentials': { + 'aws_access_key_id': 'cfg_access_key', + 'aws_secret_access_key': 'cfg_secret_key', + } + } + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'shared_access_key') + self.assertEqual(p.secret_key, 'shared_secret_key') + self.assertIsNone(p.security_token) + + def test_shared_creds_profile_beats_defaults(self): + self.shared_config = { + 'default': { + 'aws_access_key_id': 'shared_access_key', + 'aws_secret_access_key': 'shared_secret_key', + }, + 'foo': { + 'aws_access_key_id': 'foo_access_key', + 'aws_secret_access_key': 'foo_secret_key', + } + } + p = provider.Provider('aws', profile_name='foo') + self.assertEqual(p.access_key, 'foo_access_key') + self.assertEqual(p.secret_key, 'foo_secret_key') + self.assertIsNone(p.security_token) + + def test_env_profile_loads_profile(self): + self.environ['AWS_PROFILE'] = 'foo' + self.shared_config = { + 'default': { + 'aws_access_key_id': 'shared_access_key', + 'aws_secret_access_key': 'shared_secret_key', + }, + 'foo': { + 'aws_access_key_id': 'shared_access_key_foo', + 'aws_secret_access_key': 'shared_secret_key_foo', + } + } + self.config = { + 'profile foo': { + 'aws_access_key_id': 'cfg_access_key_foo', + 'aws_secret_access_key': 'cfg_secret_key_foo', + }, + 'Credentials': { + 'aws_access_key_id': 'cfg_access_key', + 'aws_secret_access_key': 'cfg_secret_key', + } + } + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'shared_access_key_foo') + self.assertEqual(p.secret_key, 'shared_secret_key_foo') + self.assertIsNone(p.security_token) + + self.shared_config = {} + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'cfg_access_key_foo') + self.assertEqual(p.secret_key, 'cfg_secret_key_foo') + self.assertIsNone(p.security_token) + + def test_env_vars_security_token_beats_config_values(self): + self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key' + self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key' + self.environ['AWS_SECURITY_TOKEN'] = 'env_security_token' + self.shared_config = { + 'default': { + 'aws_access_key_id': 'shared_access_key', + 'aws_secret_access_key': 'shared_secret_key', + 'aws_security_token': 'shared_security_token', + } + } + self.config = { + 'Credentials': { + 'aws_access_key_id': 'cfg_access_key', + 'aws_secret_access_key': 'cfg_secret_key', + 'aws_security_token': 'cfg_security_token', + } + } + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'env_access_key') + self.assertEqual(p.secret_key, 'env_secret_key') + self.assertEqual(p.security_token, 'env_security_token') + + self.environ.clear() + p = provider.Provider('aws') + self.assertEqual(p.security_token, 'shared_security_token') + + self.shared_config.clear() + p = provider.Provider('aws') + self.assertEqual(p.security_token, 'cfg_security_token') + + def test_metadata_server_credentials(self): + self.get_instance_metadata.return_value = INSTANCE_CONFIG + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'iam_access_key') + self.assertEqual(p.secret_key, 'iam_secret_key') + self.assertEqual(p.security_token, 'iam_token') + self.assertEqual( + self.get_instance_metadata.call_args[1]['data'], + 'meta-data/iam/security-credentials/') + + def test_refresh_credentials(self): + now = datetime.utcnow() + first_expiration = (now + timedelta(seconds=10)).strftime( + "%Y-%m-%dT%H:%M:%SZ") + credentials = { + u'AccessKeyId': u'first_access_key', + u'Code': u'Success', + u'Expiration': first_expiration, + u'LastUpdated': u'2012-08-31T21:43:40Z', + u'SecretAccessKey': u'first_secret_key', + u'Token': u'first_token', + u'Type': u'AWS-HMAC' + } + instance_config = {'allowall': credentials} + self.get_instance_metadata.return_value = instance_config + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'first_access_key') + self.assertEqual(p.secret_key, 'first_secret_key') + self.assertEqual(p.security_token, 'first_token') + self.assertIsNotNone(p._credential_expiry_time) + + # Now set the expiration to something in the past. + expired = now - timedelta(seconds=20) + p._credential_expiry_time = expired + credentials['AccessKeyId'] = 'second_access_key' + credentials['SecretAccessKey'] = 'second_secret_key' + credentials['Token'] = 'second_token' + self.get_instance_metadata.return_value = instance_config + + # Now upon attribute access, the credentials should be updated. + self.assertEqual(p.access_key, 'second_access_key') + self.assertEqual(p.secret_key, 'second_secret_key') + self.assertEqual(p.security_token, 'second_token') + + @mock.patch('boto.provider.config.getint') + @mock.patch('boto.provider.config.getfloat') + def test_metadata_config_params(self, config_float, config_int): + config_int.return_value = 10 + config_float.return_value = 4.0 + self.get_instance_metadata.return_value = INSTANCE_CONFIG + p = provider.Provider('aws') + self.assertEqual(p.access_key, 'iam_access_key') + self.assertEqual(p.secret_key, 'iam_secret_key') + self.assertEqual(p.security_token, 'iam_token') + self.get_instance_metadata.assert_called_with( + timeout=4.0, num_retries=10, + data='meta-data/iam/security-credentials/') + + def test_provider_google(self): + self.environ['GS_ACCESS_KEY_ID'] = 'env_access_key' + self.environ['GS_SECRET_ACCESS_KEY'] = 'env_secret_key' + self.shared_config = { + 'default': { + 'gs_access_key_id': 'shared_access_key', + 'gs_secret_access_key': 'shared_secret_key', + } + } + self.config = { + 'Credentials': { + 'gs_access_key_id': 'cfg_access_key', + 'gs_secret_access_key': 'cfg_secret_key', + } + } + p = provider.Provider('google') + self.assertEqual(p.access_key, 'env_access_key') + self.assertEqual(p.secret_key, 'env_secret_key') + + self.environ.clear() + p = provider.Provider('google') + self.assertEqual(p.access_key, 'shared_access_key') + self.assertEqual(p.secret_key, 'shared_secret_key') + + self.shared_config.clear() + p = provider.Provider('google') + self.assertEqual(p.access_key, 'cfg_access_key') + self.assertEqual(p.secret_key, 'cfg_secret_key') + + @mock.patch('os.path.isfile', return_value=True) + @mock.patch.object(provider.Config, 'load_from_path') + def test_shared_config_loading(self, load_from_path, exists): + provider.Provider('aws') + path = os.path.join(expanduser('~'), '.aws', 'credentials') + exists.assert_called_once_with(path) + load_from_path.assert_called_once_with(path) + + exists.reset_mock() + load_from_path.reset_mock() + + provider.Provider('google') + path = os.path.join(expanduser('~'), '.google', 'credentials') + exists.assert_called_once_with(path) + load_from_path.assert_called_once_with(path) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/rds/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/rds/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/rds/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/rds/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..fbc65b0949379c7b63ebf16cfd4d96581194ee93 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/rds/test_connection.py @@ -0,0 +1,787 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.ec2.securitygroup import SecurityGroup +from boto.rds import RDSConnection +from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership +from boto.rds.parametergroup import ParameterGroup +from boto.rds.logfile import LogFile, LogFileObject + +import xml.sax.saxutils as saxutils + +class TestRDSConnection(AWSMockServiceTestCase): + connection_class = RDSConnection + + def setUp(self): + super(TestRDSConnection, self).setUp() + + def default_body(self): + return """ + + + + + 2000 + 1 + false + backing-up + mydbinstance2 + 10:30-11:00 + wed:06:30-wed:07:00 + + default:mysql-5-5 + in-sync + + us-west-2b + + mysql + + general-public-license + + + in-sync + default.mysql5.5 + + + + 3306 +
    mydbinstance2.c0hjqouvn9mf.us-west-2.rds.amazonaws.com
    +
    + 5.5.27 + + + active + default + + + + + sg-1 + active + + + mydb2 + true + 2012-10-03T22:01:51.047Z + 200 + db.m1.large + awsuser + + + + true + replicating + read replication + + + + 990524496922 + Complete + My modified DBSubnetGroup + mydbsubnetgroup + + + Active + subnet-7c5b4115 + + us-east-1c + + + + Active + subnet-7b5b4112 + + us-east-1b + + + + Active + subnet-3ea6bd57 + + us-east-1d + + + + +
    +
    +
    +
    + """ + + def test_get_all_db_instances(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_dbinstances('instance_id') + self.assertEqual(len(response), 1) + self.assert_request_parameters({ + 'Action': 'DescribeDBInstances', + 'DBInstanceIdentifier': 'instance_id', + }, ignore_params_values=['Version']) + db = response[0] + self.assertEqual(db.id, 'mydbinstance2') + self.assertEqual(db.create_time, '2012-10-03T22:01:51.047Z') + self.assertEqual(db.engine, 'mysql') + self.assertEqual(db.status, 'backing-up') + self.assertEqual(db.allocated_storage, 200) + self.assertEqual( + db.endpoint, + (u'mydbinstance2.c0hjqouvn9mf.us-west-2.rds.amazonaws.com', 3306)) + self.assertEqual(db.instance_class, 'db.m1.large') + self.assertEqual(db.master_username, 'awsuser') + self.assertEqual(db.availability_zone, 'us-west-2b') + self.assertEqual(db.backup_retention_period, 1) + self.assertEqual(db.preferred_backup_window, '10:30-11:00') + self.assertEqual(db.preferred_maintenance_window, + 'wed:06:30-wed:07:00') + self.assertEqual(db.latest_restorable_time, None) + self.assertEqual(db.multi_az, False) + self.assertEqual(db.iops, 2000) + self.assertEqual(db.pending_modified_values, {}) + + self.assertEqual(db.parameter_group.name, + 'default.mysql5.5') + self.assertEqual(db.parameter_group.description, None) + self.assertEqual(db.parameter_group.engine, None) + + self.assertEqual(db.security_group.owner_id, None) + self.assertEqual(db.security_group.name, 'default') + self.assertEqual(db.security_group.description, None) + self.assertEqual(db.security_group.ec2_groups, []) + self.assertEqual(db.security_group.ip_ranges, []) + self.assertEqual(len(db.status_infos), 1) + self.assertEqual(db.status_infos[0].message, '') + self.assertEqual(db.status_infos[0].normal, True) + self.assertEqual(db.status_infos[0].status, 'replicating') + self.assertEqual(db.status_infos[0].status_type, 'read replication') + self.assertEqual(db.vpc_security_groups[0].status, 'active') + self.assertEqual(db.vpc_security_groups[0].vpc_group, 'sg-1') + self.assertEqual(db.license_model, 'general-public-license') + self.assertEqual(db.engine_version, '5.5.27') + self.assertEqual(db.auto_minor_version_upgrade, True) + self.assertEqual(db.subnet_group.name, 'mydbsubnetgroup') + + +class TestRDSCCreateDBInstance(AWSMockServiceTestCase): + connection_class = RDSConnection + + def setUp(self): + super(TestRDSCCreateDBInstance, self).setUp() + + def default_body(self): + return """ + + + + + mysql + + **** + + 0 + false + general-public-license + + 990524496922 + Complete + description + subnet_grp1 + + + Active + subnet-7c5b4115 + + us-east-1c + + + + Active + subnet-7b5b4112 + + us-east-1b + + + + Active + subnet-3ea6bd57 + + us-east-1d + + + + + creating + 5.1.50 + simcoprod01 + + + in-sync + default.mysql5.1 + + + + + active + default + + + 00:00-00:30 + true + sat:07:30-sat:08:00 + 10 + db.m1.large + master + + + + 2e5d4270-8501-11e0-bd9b-a7b1ece36d51 + + + """ + + def test_create_db_instance_param_group_name(self): + self.set_http_response(status_code=200) + db = self.service_connection.create_dbinstance( + 'SimCoProd01', + 10, + 'db.m1.large', + 'master', + 'Password01', + param_group='default.mysql5.1', + db_subnet_group_name='dbSubnetgroup01', + backup_retention_period=0) + + self.assert_request_parameters({ + 'Action': 'CreateDBInstance', + 'AllocatedStorage': 10, + 'AutoMinorVersionUpgrade': 'true', + 'BackupRetentionPeriod': 0, + 'DBInstanceClass': 'db.m1.large', + 'DBInstanceIdentifier': 'SimCoProd01', + 'DBParameterGroupName': 'default.mysql5.1', + 'DBSubnetGroupName': 'dbSubnetgroup01', + 'Engine': 'MySQL5.1', + 'MasterUsername': 'master', + 'MasterUserPassword': 'Password01', + 'Port': 3306 + }, ignore_params_values=['Version']) + + self.assertEqual(db.id, 'simcoprod01') + self.assertEqual(db.engine, 'mysql') + self.assertEqual(db.status, 'creating') + self.assertEqual(db.allocated_storage, 10) + self.assertEqual(db.instance_class, 'db.m1.large') + self.assertEqual(db.master_username, 'master') + self.assertEqual(db.multi_az, False) + self.assertEqual(db.pending_modified_values, + {'MasterUserPassword': '****'}) + + self.assertEqual(db.parameter_group.name, + 'default.mysql5.1') + self.assertEqual(db.parameter_group.description, None) + self.assertEqual(db.parameter_group.engine, None) + self.assertEqual(db.backup_retention_period, 0) + + def test_create_db_instance_param_group_instance(self): + self.set_http_response(status_code=200) + param_group = ParameterGroup() + param_group.name = 'default.mysql5.1' + db = self.service_connection.create_dbinstance( + 'SimCoProd01', + 10, + 'db.m1.large', + 'master', + 'Password01', + param_group=param_group, + db_subnet_group_name='dbSubnetgroup01') + + self.assert_request_parameters({ + 'Action': 'CreateDBInstance', + 'AllocatedStorage': 10, + 'AutoMinorVersionUpgrade': 'true', + 'DBInstanceClass': 'db.m1.large', + 'DBInstanceIdentifier': 'SimCoProd01', + 'DBParameterGroupName': 'default.mysql5.1', + 'DBSubnetGroupName': 'dbSubnetgroup01', + 'Engine': 'MySQL5.1', + 'MasterUsername': 'master', + 'MasterUserPassword': 'Password01', + 'Port': 3306, + }, ignore_params_values=['Version']) + + self.assertEqual(db.id, 'simcoprod01') + self.assertEqual(db.engine, 'mysql') + self.assertEqual(db.status, 'creating') + self.assertEqual(db.allocated_storage, 10) + self.assertEqual(db.instance_class, 'db.m1.large') + self.assertEqual(db.master_username, 'master') + self.assertEqual(db.multi_az, False) + self.assertEqual(db.pending_modified_values, + {'MasterUserPassword': '****'}) + self.assertEqual(db.parameter_group.name, + 'default.mysql5.1') + self.assertEqual(db.parameter_group.description, None) + self.assertEqual(db.parameter_group.engine, None) + + +class TestRDSConnectionRestoreDBInstanceFromPointInTime(AWSMockServiceTestCase): + connection_class = RDSConnection + + def setUp(self): + super(TestRDSConnectionRestoreDBInstanceFromPointInTime, self).setUp() + + def default_body(self): + return """ + + + + + mysql + + 1 + false + general-public-license + creating + 5.1.50 + restored-db + + + in-sync + default.mysql5.1 + + + + + active + default + + + 00:00-00:30 + true + sat:07:30-sat:08:00 + 10 + db.m1.large + master + + + + 1ef546bc-850b-11e0-90aa-eb648410240d + + + """ + + def test_restore_dbinstance_from_point_in_time(self): + self.set_http_response(status_code=200) + db = self.service_connection.restore_dbinstance_from_point_in_time( + 'simcoprod01', + 'restored-db', + True) + + self.assert_request_parameters({ + 'Action': 'RestoreDBInstanceToPointInTime', + 'SourceDBInstanceIdentifier': 'simcoprod01', + 'TargetDBInstanceIdentifier': 'restored-db', + 'UseLatestRestorableTime': 'true', + }, ignore_params_values=['Version']) + + self.assertEqual(db.id, 'restored-db') + self.assertEqual(db.engine, 'mysql') + self.assertEqual(db.status, 'creating') + self.assertEqual(db.allocated_storage, 10) + self.assertEqual(db.instance_class, 'db.m1.large') + self.assertEqual(db.master_username, 'master') + self.assertEqual(db.multi_az, False) + + self.assertEqual(db.parameter_group.name, + 'default.mysql5.1') + self.assertEqual(db.parameter_group.description, None) + self.assertEqual(db.parameter_group.engine, None) + + def test_restore_dbinstance_from_point_in_time__db_subnet_group_name(self): + self.set_http_response(status_code=200) + db = self.service_connection.restore_dbinstance_from_point_in_time( + 'simcoprod01', + 'restored-db', + True, + db_subnet_group_name='dbsubnetgroup') + + self.assert_request_parameters({ + 'Action': 'RestoreDBInstanceToPointInTime', + 'SourceDBInstanceIdentifier': 'simcoprod01', + 'TargetDBInstanceIdentifier': 'restored-db', + 'UseLatestRestorableTime': 'true', + 'DBSubnetGroupName': 'dbsubnetgroup', + }, ignore_params_values=['Version']) + + def test_create_db_instance_vpc_sg_str(self): + self.set_http_response(status_code=200) + vpc_security_groups = [ + VPCSecurityGroupMembership(self.service_connection, 'active', 'sg-1'), + VPCSecurityGroupMembership(self.service_connection, None, 'sg-2')] + + db = self.service_connection.create_dbinstance( + 'SimCoProd01', + 10, + 'db.m1.large', + 'master', + 'Password01', + param_group='default.mysql5.1', + db_subnet_group_name='dbSubnetgroup01', + vpc_security_groups=vpc_security_groups) + + self.assert_request_parameters({ + 'Action': 'CreateDBInstance', + 'AllocatedStorage': 10, + 'AutoMinorVersionUpgrade': 'true', + 'DBInstanceClass': 'db.m1.large', + 'DBInstanceIdentifier': 'SimCoProd01', + 'DBParameterGroupName': 'default.mysql5.1', + 'DBSubnetGroupName': 'dbSubnetgroup01', + 'Engine': 'MySQL5.1', + 'MasterUsername': 'master', + 'MasterUserPassword': 'Password01', + 'Port': 3306, + 'VpcSecurityGroupIds.member.1': 'sg-1', + 'VpcSecurityGroupIds.member.2': 'sg-2' + }, ignore_params_values=['Version']) + + def test_create_db_instance_vpc_sg_obj(self): + self.set_http_response(status_code=200) + + sg1 = SecurityGroup(name='sg-1') + sg2 = SecurityGroup(name='sg-2') + + vpc_security_groups = [ + VPCSecurityGroupMembership(self.service_connection, 'active', sg1.name), + VPCSecurityGroupMembership(self.service_connection, None, sg2.name)] + + db = self.service_connection.create_dbinstance( + 'SimCoProd01', + 10, + 'db.m1.large', + 'master', + 'Password01', + param_group='default.mysql5.1', + db_subnet_group_name='dbSubnetgroup01', + vpc_security_groups=vpc_security_groups) + + self.assert_request_parameters({ + 'Action': 'CreateDBInstance', + 'AllocatedStorage': 10, + 'AutoMinorVersionUpgrade': 'true', + 'DBInstanceClass': 'db.m1.large', + 'DBInstanceIdentifier': 'SimCoProd01', + 'DBParameterGroupName': 'default.mysql5.1', + 'DBSubnetGroupName': 'dbSubnetgroup01', + 'Engine': 'MySQL5.1', + 'MasterUsername': 'master', + 'MasterUserPassword': 'Password01', + 'Port': 3306, + 'VpcSecurityGroupIds.member.1': 'sg-1', + 'VpcSecurityGroupIds.member.2': 'sg-2' + }, ignore_params_values=['Version']) + + +class TestRDSOptionGroups(AWSMockServiceTestCase): + connection_class = RDSConnection + + def setUp(self): + super(TestRDSOptionGroups, self).setUp() + + def default_body(self): + return """ + + + + + 11.2 + myoptiongroup + oracle-se1 + Test option group + + + + 11.2 + default:oracle-se1-11-2 + oracle-se1 + Default Option Group. + + + + + + e4b234d9-84d5-11e1-87a6-71059839a52b + + + """ + + def test_describe_option_groups(self): + self.set_http_response(status_code=200) + response = self.service_connection.describe_option_groups() + self.assertEqual(len(response), 2) + options = response[0] + self.assertEqual(options.name, 'myoptiongroup') + self.assertEqual(options.description, 'Test option group') + self.assertEqual(options.engine_name, 'oracle-se1') + self.assertEqual(options.major_engine_version, '11.2') + options = response[1] + self.assertEqual(options.name, 'default:oracle-se1-11-2') + self.assertEqual(options.description, 'Default Option Group.') + self.assertEqual(options.engine_name, 'oracle-se1') + self.assertEqual(options.major_engine_version, '11.2') + +class TestRDSLogFile(AWSMockServiceTestCase): + connection_class = RDSConnection + + def setUp(self): + super(TestRDSLogFile, self).setUp() + + def default_body(self): + return """ + + + + + 1364403600000 + error/mysql-error-running.log + 0 + + + 1364338800000 + error/mysql-error-running.log.0 + 0 + + + 1364342400000 + error/mysql-error-running.log.1 + 0 + + + 1364346000000 + error/mysql-error-running.log.2 + 0 + + + 1364349600000 + error/mysql-error-running.log.3 + 0 + + + 1364405700000 + error/mysql-error.log + 0 + + + + + d70fb3b3-9704-11e2-a0db-871552e0ef19 + + + """ + + def test_get_all_logs_simple(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_logs('db1') + + self.assert_request_parameters({ + 'Action': 'DescribeDBLogFiles', + 'DBInstanceIdentifier': 'db1', + }, ignore_params_values=['Version']) + + self.assertEqual(len(response), 6) + self.assertTrue(isinstance(response[0], LogFile)) + self.assertEqual(response[0].log_filename, 'error/mysql-error-running.log') + self.assertEqual(response[0].last_written, '1364403600000') + self.assertEqual(response[0].size, '0') + + def test_get_all_logs_filtered(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_logs('db_instance_1', max_records=100, marker='error/mysql-error.log', file_size=2000000, filename_contains='error', file_last_written=12345678) + + self.assert_request_parameters({ + 'Action': 'DescribeDBLogFiles', + 'DBInstanceIdentifier': 'db_instance_1', + 'MaxRecords': 100, + 'Marker': 'error/mysql-error.log', + 'FileSize': 2000000, + 'FilenameContains': 'error', + 'FileLastWritten': 12345678, + }, ignore_params_values=['Version']) + + self.assertEqual(len(response), 6) + self.assertTrue(isinstance(response[0], LogFile)) + self.assertEqual(response[0].log_filename, 'error/mysql-error-running.log') + self.assertEqual(response[0].last_written, '1364403600000') + self.assertEqual(response[0].size, '0') + + +class TestRDSLogFileDownload(AWSMockServiceTestCase): + connection_class = RDSConnection + logfile_sample = """ +??2014-01-26 23:59:00.01 spid54 Microsoft SQL Server 2012 - 11.0.2100.60 (X64) + + Feb 10 2012 19:39:15 + + Copyright (c) Microsoft Corporation + + Web Edition (64-bit) on Windows NT 6.1 <X64> (Build 7601: Service Pack 1) (Hypervisor) + + + +2014-01-26 23:59:00.01 spid54 (c) Microsoft Corporation. + +2014-01-26 23:59:00.01 spid54 All rights reserved. + +2014-01-26 23:59:00.01 spid54 Server process ID is 2976. + +2014-01-26 23:59:00.01 spid54 System Manufacturer: 'Xen', System Model: 'HVM domU'. + +2014-01-26 23:59:00.01 spid54 Authentication mode is MIXED. + +2014-01-26 23:59:00.01 spid54 Logging SQL Server messages in file 'D:\RDSDBDATA\Log\ERROR'. + +2014-01-26 23:59:00.01 spid54 The service account is 'WORKGROUP\AMAZONA-NUQUUMV$'. This is an informational message; no user action is required. + +2014-01-26 23:59:00.01 spid54 The error log has been reinitialized. See the previous log for older entries. + +2014-01-27 00:00:56.42 spid25s This instance of SQL Server has been using a process ID of 2976 since 10/21/2013 2:16:50 AM (local) 10/21/2013 2:16:50 AM (UTC). This is an informational message only; no user action is required. + +2014-01-27 09:35:15.43 spid71 I/O is frozen on database model. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup. + +2014-01-27 09:35:15.44 spid72 I/O is frozen on database msdb. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup. + +2014-01-27 09:35:15.44 spid74 I/O is frozen on database rdsadmin. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup. + +2014-01-27 09:35:15.44 spid73 I/O is frozen on database master. No user action is required. However, if I/O is not resumed promptly, you could cancel the backup. + +2014-01-27 09:35:25.57 spid73 I/O was resumed on database master. No user action is required. + +2014-01-27 09:35:25.57 spid74 I/O was resumed on database rdsadmin. No user action is required. + +2014-01-27 09:35:25.57 spid71 I/O was resumed on database model. No user action is required. + +2014-01-27 09:35:25.57 spid72 I/O was resumed on database msdb. No user action is required. + """ + + def setUp(self): + super(TestRDSLogFileDownload, self).setUp() + + def default_body(self): + return """ + + + 0:4485 + %s + false + + + 27143615-87ae-11e3-acc9-fb64b157268e + + + """ % self.logfile_sample + + def test_single_download(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_log_file('db1', 'foo.log') + + self.assertTrue(isinstance(response, LogFileObject)) + self.assertEqual(response.marker, '0:4485') + self.assertEqual(response.dbinstance_id, 'db1') + self.assertEqual(response.log_filename, 'foo.log') + + self.assertEqual(response.data, saxutils.unescape(self.logfile_sample)) + + self.assert_request_parameters({ + 'Action': 'DownloadDBLogFilePortion', + 'DBInstanceIdentifier': 'db1', + 'LogFileName': 'foo.log', + }, ignore_params_values=['Version']) + + def test_multi_args(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_log_file('db1', 'foo.log', marker='0:4485', number_of_lines=10) + + self.assertTrue(isinstance(response, LogFileObject)) + + self.assert_request_parameters({ + 'Action': 'DownloadDBLogFilePortion', + 'DBInstanceIdentifier': 'db1', + 'Marker': '0:4485', + 'NumberOfLines': 10, + 'LogFileName': 'foo.log', + }, ignore_params_values=['Version']) + + +class TestRDSOptionGroupOptions(AWSMockServiceTestCase): + connection_class = RDSConnection + + def setUp(self): + super(TestRDSOptionGroupOptions, self).setUp() + + def default_body(self): + return """ + + + + + 11.2 + true + + Oracle Enterprise Manager + 1158 + OEM + oracle-se1 + 0.2.v3 + false + false + + + + + d9c8f6a1-84c7-11e1-a264-0b23c28bc344 + + + """ + + def test_describe_option_group_options(self): + self.set_http_response(status_code=200) + response = self.service_connection.describe_option_group_options() + self.assertEqual(len(response), 1) + options = response[0] + self.assertEqual(options.name, 'OEM') + self.assertEqual(options.description, 'Oracle Enterprise Manager') + self.assertEqual(options.engine_name, 'oracle-se1') + self.assertEqual(options.major_engine_version, '11.2') + self.assertEqual(options.min_minor_engine_version, '0.2.v3') + self.assertEqual(options.port_required, True) + self.assertEqual(options.default_port, 1158) + self.assertEqual(options.permanent, False) + self.assertEqual(options.persistent, False) + self.assertEqual(options.depends_on, []) + + +if __name__ == '__main__': + unittest.main() + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/rds/test_snapshot.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/rds/test_snapshot.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c9d8a67282483ca4c145ede59fc32c350a3738 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/rds/test_snapshot.py @@ -0,0 +1,296 @@ +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.rds import RDSConnection +from boto.rds.dbsnapshot import DBSnapshot +from boto.rds import DBInstance + + +class TestDescribeDBSnapshots(AWSMockServiceTestCase): + connection_class = RDSConnection + + def default_body(self): + return """ + + + + + 3306 + 2011-05-23T06:29:03.483Z + mysql + available + us-east-1a + general-public-license + 2011-05-23T06:06:43.110Z + 10 + simcoprod01 + 5.1.50 + mydbsnapshot + manual + master + myoptiongroupname + 1000 + 100 + eu-west-1 + myvpc + + + 3306 + 2011-03-11T07:20:24.082Z + mysql + available + us-east-1a + general-public-license + 2010-08-04T23:27:36.420Z + 50 + mydbinstance + 5.1.49 + mysnapshot1 + manual + sa + myoptiongroupname + 1000 + + + 3306 + 2012-04-02T00:01:24.082Z + mysql + available + us-east-1d + general-public-license + 2010-07-16T00:06:59.107Z + 60 + simcoprod01 + 5.1.47 + rds:simcoprod01-2012-04-02-00-01 + automated + master + myoptiongroupname + 1000 + + + + + c4191173-8506-11e0-90aa-eb648410240d + + + """ + + def test_describe_dbinstances_by_instance(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_dbsnapshots(instance_id='simcoprod01') + self.assert_request_parameters({ + 'Action': 'DescribeDBSnapshots', + 'DBInstanceIdentifier': 'simcoprod01' + }, ignore_params_values=['Version']) + self.assertEqual(len(response), 3) + self.assertIsInstance(response[0], DBSnapshot) + self.assertEqual(response[0].id, 'mydbsnapshot') + self.assertEqual(response[0].status, 'available') + self.assertEqual(response[0].instance_id, 'simcoprod01') + self.assertEqual(response[0].engine_version, '5.1.50') + self.assertEqual(response[0].license_model, 'general-public-license') + self.assertEqual(response[0].iops, 1000) + self.assertEqual(response[0].option_group_name, 'myoptiongroupname') + self.assertEqual(response[0].percent_progress, 100) + self.assertEqual(response[0].snapshot_type, 'manual') + self.assertEqual(response[0].source_region, 'eu-west-1') + self.assertEqual(response[0].vpc_id, 'myvpc') + + + +class TestCreateDBSnapshot(AWSMockServiceTestCase): + connection_class = RDSConnection + + def default_body(self): + return """ + + + + 3306 + mysql + creating + us-east-1a + general-public-license + 2011-05-23T06:06:43.110Z + 10 + simcoprod01 + 5.1.50 + mydbsnapshot + manual + master + + + + c4181d1d-8505-11e0-90aa-eb648410240d + + + """ + + def test_create_dbinstance(self): + self.set_http_response(status_code=200) + response = self.service_connection.create_dbsnapshot('mydbsnapshot', 'simcoprod01') + self.assert_request_parameters({ + 'Action': 'CreateDBSnapshot', + 'DBSnapshotIdentifier': 'mydbsnapshot', + 'DBInstanceIdentifier': 'simcoprod01' + }, ignore_params_values=['Version']) + self.assertIsInstance(response, DBSnapshot) + self.assertEqual(response.id, 'mydbsnapshot') + self.assertEqual(response.instance_id, 'simcoprod01') + self.assertEqual(response.status, 'creating') + + +class TestCopyDBSnapshot(AWSMockServiceTestCase): + connection_class = RDSConnection + + def default_body(self): + return """ + + + + 3306 + mysql + available + us-east-1a + general-public-license + 2011-05-23T06:06:43.110Z + 10 + simcoprod01 + 5.1.50 + mycopieddbsnapshot + manual + master + + + + c4181d1d-8505-11e0-90aa-eb648410240d + + + """ + + def test_copy_dbinstance(self): + self.set_http_response(status_code=200) + response = self.service_connection.copy_dbsnapshot('myautomaticdbsnapshot', 'mycopieddbsnapshot') + self.assert_request_parameters({ + 'Action': 'CopyDBSnapshot', + 'SourceDBSnapshotIdentifier': 'myautomaticdbsnapshot', + 'TargetDBSnapshotIdentifier': 'mycopieddbsnapshot' + }, ignore_params_values=['Version']) + self.assertIsInstance(response, DBSnapshot) + self.assertEqual(response.id, 'mycopieddbsnapshot') + self.assertEqual(response.status, 'available') + + +class TestDeleteDBSnapshot(AWSMockServiceTestCase): + connection_class = RDSConnection + + def default_body(self): + return """ + + + + 3306 + 2011-03-11T07:20:24.082Z + mysql + deleted + us-east-1d + general-public-license + 2010-07-16T00:06:59.107Z + 60 + simcoprod01 + 5.1.47 + mysnapshot2 + manual + master + + + + 627a43a1-8507-11e0-bd9b-a7b1ece36d51 + + + """ + + def test_delete_dbinstance(self): + self.set_http_response(status_code=200) + response = self.service_connection.delete_dbsnapshot('mysnapshot2') + self.assert_request_parameters({ + 'Action': 'DeleteDBSnapshot', + 'DBSnapshotIdentifier': 'mysnapshot2' + }, ignore_params_values=['Version']) + self.assertIsInstance(response, DBSnapshot) + self.assertEqual(response.id, 'mysnapshot2') + self.assertEqual(response.status, 'deleted') + + +class TestRestoreDBInstanceFromDBSnapshot(AWSMockServiceTestCase): + connection_class = RDSConnection + + def default_body(self): + return """ + + + + + mysql + + 1 + false + general-public-license + creating + 5.1.50 + myrestoreddbinstance + + + in-sync + default.mysql5.1 + + + + + active + default + + + 00:00-00:30 + true + sat:07:30-sat:08:00 + 10 + db.m1.large + master + + + + 7ca622e8-8508-11e0-bd9b-a7b1ece36d51 + + + """ + + def test_restore_dbinstance_from_dbsnapshot(self): + self.set_http_response(status_code=200) + response = self.service_connection.restore_dbinstance_from_dbsnapshot('mydbsnapshot', + 'myrestoreddbinstance', + 'db.m1.large', + '3306', + 'us-east-1a', + 'false', + 'true') + self.assert_request_parameters({ + 'Action': 'RestoreDBInstanceFromDBSnapshot', + 'DBSnapshotIdentifier': 'mydbsnapshot', + 'DBInstanceIdentifier': 'myrestoreddbinstance', + 'DBInstanceClass': 'db.m1.large', + 'Port': '3306', + 'AvailabilityZone': 'us-east-1a', + 'MultiAZ': 'false', + 'AutoMinorVersionUpgrade': 'true' + }, ignore_params_values=['Version']) + self.assertIsInstance(response, DBInstance) + self.assertEqual(response.id, 'myrestoreddbinstance') + self.assertEqual(response.status, 'creating') + self.assertEqual(response.instance_class, 'db.m1.large') + self.assertEqual(response.multi_az, False) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/rds2/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/rds2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/rds2/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/rds2/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..8550294559ade16132332d130cb5de8eae7940c2 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/rds2/test_connection.py @@ -0,0 +1,209 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.ec2.securitygroup import SecurityGroup +from boto.rds2.layer1 import RDSConnection + + +class TestRDS2Connection(AWSMockServiceTestCase): + connection_class = RDSConnection + + def setUp(self): + super(TestRDS2Connection, self).setUp() + + def default_body(self): + return """{ + "DescribeDBInstancesResponse": { + "DescribeDBInstancesResult": { + "DBInstances": [{ + "DBInstance": { + "Iops": 2000, + "BackupRetentionPeriod": 1, + "MultiAZ": false, + "DBInstanceStatus": "backing-up", + "DBInstanceIdentifier": "mydbinstance2", + "PreferredBackupWindow": "10:30-11:00", + "PreferredMaintenanceWindow": "wed:06:30-wed:07:00", + "OptionGroupMembership": { + "OptionGroupName": "default:mysql-5-5", + "Status": "in-sync" + }, + "AvailabilityZone": "us-west-2b", + "ReadReplicaDBInstanceIdentifiers": null, + "Engine": "mysql", + "PendingModifiedValues": null, + "LicenseModel": "general-public-license", + "DBParameterGroups": [{ + "DBParameterGroup": { + "ParameterApplyStatus": "in-sync", + "DBParameterGroupName": "default.mysql5.5" + } + }], + "Endpoint": { + "Port": 3306, + "Address": "mydbinstance2.c0hjqouvn9mf.us-west-2.rds.amazonaws.com" + }, + "EngineVersion": "5.5.27", + "DBSecurityGroups": [{ + "DBSecurityGroup": { + "Status": "active", + "DBSecurityGroupName": "default" + } + }], + "VpcSecurityGroups": [{ + "VpcSecurityGroupMembership": { + "VpcSecurityGroupId": "sg-1", + "Status": "active" + } + }], + "DBName": "mydb2", + "AutoMinorVersionUpgrade": true, + "InstanceCreateTime": "2012-10-03T22:01:51.047Z", + "AllocatedStorage": 200, + "DBInstanceClass": "db.m1.large", + "MasterUsername": "awsuser", + "StatusInfos": [{ + "DBInstanceStatusInfo": { + "Message": null, + "Normal": true, + "Status": "replicating", + "StatusType": "read replication" + } + }], + "DBSubnetGroup": { + "VpcId": "990524496922", + "SubnetGroupStatus": "Complete", + "DBSubnetGroupDescription": "My modified DBSubnetGroup", + "DBSubnetGroupName": "mydbsubnetgroup", + "Subnets": [{ + "Subnet": { + "SubnetStatus": "Active", + "SubnetIdentifier": "subnet-7c5b4115", + "SubnetAvailabilityZone": { + "Name": "us-east-1c" + } + }, + "Subnet": { + "SubnetStatus": "Active", + "SubnetIdentifier": "subnet-7b5b4112", + "SubnetAvailabilityZone": { + "Name": "us-east-1b" + } + }, + "Subnet": { + "SubnetStatus": "Active", + "SubnetIdentifier": "subnet-3ea6bd57", + "SubnetAvailabilityZone": { + "Name": "us-east-1d" + } + } + }] + } + } + }] + } + } + }""" + + def test_describe_db_instances(self): + self.set_http_response(status_code=200) + response = self.service_connection.describe_db_instances('instance_id') + self.assertEqual(len(response), 1) + self.assert_request_parameters({ + 'Action': 'DescribeDBInstances', + 'ContentType': 'JSON', + 'DBInstanceIdentifier': 'instance_id', + }, ignore_params_values=['Version']) + db = response['DescribeDBInstancesResponse']\ + ['DescribeDBInstancesResult']['DBInstances'][0]\ + ['DBInstance'] + self.assertEqual(db['DBInstanceIdentifier'], 'mydbinstance2') + self.assertEqual(db['InstanceCreateTime'], '2012-10-03T22:01:51.047Z') + self.assertEqual(db['Engine'], 'mysql') + self.assertEqual(db['DBInstanceStatus'], 'backing-up') + self.assertEqual(db['AllocatedStorage'], 200) + self.assertEqual(db['Endpoint']['Port'], 3306) + self.assertEqual(db['DBInstanceClass'], 'db.m1.large') + self.assertEqual(db['MasterUsername'], 'awsuser') + self.assertEqual(db['AvailabilityZone'], 'us-west-2b') + self.assertEqual(db['BackupRetentionPeriod'], 1) + self.assertEqual(db['PreferredBackupWindow'], '10:30-11:00') + self.assertEqual(db['PreferredMaintenanceWindow'], + 'wed:06:30-wed:07:00') + self.assertEqual(db['MultiAZ'], False) + self.assertEqual(db['Iops'], 2000) + self.assertEqual(db['PendingModifiedValues'], None) + self.assertEqual( + db['DBParameterGroups'][0]['DBParameterGroup']\ + ['DBParameterGroupName'], + 'default.mysql5.5' + ) + self.assertEqual( + db['DBSecurityGroups'][0]['DBSecurityGroup']['DBSecurityGroupName'], + 'default' + ) + self.assertEqual( + db['DBSecurityGroups'][0]['DBSecurityGroup']['Status'], + 'active' + ) + self.assertEqual(len(db['StatusInfos']), 1) + self.assertEqual( + db['StatusInfos'][0]['DBInstanceStatusInfo']['Message'], + None + ) + self.assertEqual( + db['StatusInfos'][0]['DBInstanceStatusInfo']['Normal'], + True + ) + self.assertEqual( + db['StatusInfos'][0]['DBInstanceStatusInfo']['Status'], + 'replicating' + ) + self.assertEqual( + db['StatusInfos'][0]['DBInstanceStatusInfo']['StatusType'], + 'read replication' + ) + self.assertEqual( + db['VpcSecurityGroups'][0]['VpcSecurityGroupMembership']['Status'], + 'active' + ) + self.assertEqual( + db['VpcSecurityGroups'][0]['VpcSecurityGroupMembership']\ + ['VpcSecurityGroupId'], + 'sg-1' + ) + self.assertEqual(db['LicenseModel'], 'general-public-license') + self.assertEqual(db['EngineVersion'], '5.5.27') + self.assertEqual(db['AutoMinorVersionUpgrade'], True) + self.assertEqual( + db['DBSubnetGroup']['DBSubnetGroupName'], + 'mydbsubnetgroup' + ) + + +if __name__ == '__main__': + unittest.main() + diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/route53/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/route53/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/route53/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/route53/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..d1a8015256f5e5133e2f92a2a73627a2b6b84565 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/route53/test_connection.py @@ -0,0 +1,802 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from tests.compat import mock +import re +import xml.dom.minidom +from boto.exception import BotoServerError +from boto.route53.connection import Route53Connection +from boto.route53.exception import DNSServerError +from boto.route53.healthcheck import HealthCheck +from boto.route53.record import ResourceRecordSets, Record +from boto.route53.zone import Zone + +from nose.plugins.attrib import attr +from tests.unit import AWSMockServiceTestCase +from boto.compat import six +urllib = six.moves.urllib + + +@attr(route53=True) +class TestRoute53Connection(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestRoute53Connection, self).setUp() + self.calls = { + 'count': 0, + } + + def default_body(self): + return b""" + It failed. + +""" + + def test_typical_400(self): + self.set_http_response(status_code=400, header=[ + ['Code', 'AccessDenied'], + ]) + + with self.assertRaises(DNSServerError) as err: + self.service_connection.get_all_hosted_zones() + + self.assertTrue('It failed.' in str(err.exception)) + + def test_retryable_400_prior_request_not_complete(self): + # Test ability to retry on ``PriorRequestNotComplete``. + self.set_http_response(status_code=400, header=[ + ['Code', 'PriorRequestNotComplete'], + ]) + self.do_retry_handler() + + def test_retryable_400_throttling(self): + # Test ability to rety on ``Throttling``. + self.set_http_response(status_code=400, header=[ + ['Code', 'Throttling'], + ]) + self.do_retry_handler() + + @mock.patch('time.sleep') + def do_retry_handler(self, sleep_mock): + + def incr_retry_handler(func): + def _wrapper(*args, **kwargs): + self.calls['count'] += 1 + return func(*args, **kwargs) + return _wrapper + + # Patch. + orig_retry = self.service_connection._retry_handler + self.service_connection._retry_handler = incr_retry_handler( + orig_retry + ) + self.assertEqual(self.calls['count'], 0) + + # Retries get exhausted. + with self.assertRaises(BotoServerError): + self.service_connection.get_all_hosted_zones() + + self.assertEqual(self.calls['count'], 7) + + # Unpatch. + self.service_connection._retry_handler = orig_retry + + def test_private_zone_invalid_vpc_400(self): + self.set_http_response(status_code=400, header=[ + ['Code', 'InvalidVPCId'], + ]) + + with self.assertRaises(DNSServerError) as err: + self.service_connection.create_hosted_zone("example.com.", + private_zone=True) + self.assertTrue('It failed.' in str(err.exception)) + + +@attr(route53=True) +class TestCreateZoneRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestCreateZoneRoute53, self).setUp() + + def default_body(self): + return b""" + + + /hostedzone/Z11111 + example.com. + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + false + + 2 + + + /change/C1111111111111 + PENDING + 2014-02-02T10:19:29.928Z + + + + ns-100.awsdns-01.com + ns-1000.awsdns-01.co.uk + ns-1000.awsdns-01.org + ns-900.awsdns-01.net + + + + """ + + def test_create_zone(self): + self.set_http_response(status_code=201) + response = self.service_connection.create_zone("example.com.") + + self.assertTrue(isinstance(response, Zone)) + self.assertEqual(response.id, "Z11111") + self.assertEqual(response.name, "example.com.") + + def test_create_hosted_zone(self): + self.set_http_response(status_code=201) + response = self.service_connection.create_hosted_zone("example.com.", + "my_ref", + "a comment") + + self.assertEqual(response['CreateHostedZoneResponse'] + ['DelegationSet']['NameServers'], + ['ns-100.awsdns-01.com', + 'ns-1000.awsdns-01.co.uk', + 'ns-1000.awsdns-01.org', + 'ns-900.awsdns-01.net']) + + self.assertEqual(response['CreateHostedZoneResponse'] + ['HostedZone']['Config']['PrivateZone'], + u'false') + + +@attr(route53=True) +class TestCreatePrivateZoneRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestCreatePrivateZoneRoute53, self).setUp() + + def default_body(self): + return b""" + + + /hostedzone/Z11111 + example.com. + + vpc-1a2b3c4d + us-east-1 + + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + + true + + 2 + + + /change/C1111111111111 + PENDING + 2014-02-02T10:19:29.928Z + + + + ns-100.awsdns-01.com + ns-1000.awsdns-01.co.uk + ns-1000.awsdns-01.org + ns-900.awsdns-01.net + + + + """ + + def test_create_private_zone(self): + self.set_http_response(status_code=201) + r = self.service_connection.create_hosted_zone("example.com.", + private_zone=True, + vpc_id='vpc-1a2b3c4d', + vpc_region='us-east-1' + ) + + self.assertEqual(r['CreateHostedZoneResponse']['HostedZone'] + ['Config']['PrivateZone'], u'true') + self.assertEqual(r['CreateHostedZoneResponse']['HostedZone'] + ['VPC']['VPCId'], u'vpc-1a2b3c4d') + self.assertEqual(r['CreateHostedZoneResponse']['HostedZone'] + ['VPC']['VPCRegion'], u'us-east-1') + + +@attr(route53=True) +class TestGetZoneRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestGetZoneRoute53, self).setUp() + + def default_body(self): + return b""" + + + + /hostedzone/Z1111 + example2.com. + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + 3 + + + /hostedzone/Z2222 + example1.com. + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeef + + 6 + + + /hostedzone/Z3333 + example.com. + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeeg + + 6 + + + false + 100 + + """ + + def test_list_zones(self): + self.set_http_response(status_code=201) + response = self.service_connection.get_all_hosted_zones() + + domains = ['example2.com.', 'example1.com.', 'example.com.'] + print(response['ListHostedZonesResponse']['HostedZones'][0]) + for d in response['ListHostedZonesResponse']['HostedZones']: + print("Removing: %s" % d['Name']) + domains.remove(d['Name']) + + self.assertEqual(domains, []) + + def test_get_zone(self): + self.set_http_response(status_code=201) + response = self.service_connection.get_zone('example.com.') + + self.assertTrue(isinstance(response, Zone)) + self.assertEqual(response.name, "example.com.") + + +@attr(route53=True) +class TestGetHostedZoneRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestGetHostedZoneRoute53, self).setUp() + + def default_body(self): + return b""" + + + /hostedzone/Z1111 + example.com. + aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + + 3 + + + + ns-1000.awsdns-40.org + ns-200.awsdns-30.com + ns-900.awsdns-50.net + ns-1000.awsdns-00.co.uk + + + +""" + + def test_list_zones(self): + self.set_http_response(status_code=201) + response = self.service_connection.get_hosted_zone("Z1111") + + self.assertEqual(response['GetHostedZoneResponse'] + ['HostedZone']['Id'], + '/hostedzone/Z1111') + self.assertEqual(response['GetHostedZoneResponse'] + ['HostedZone']['Name'], + 'example.com.') + self.assertEqual(response['GetHostedZoneResponse'] + ['DelegationSet']['NameServers'], + ['ns-1000.awsdns-40.org', 'ns-200.awsdns-30.com', + 'ns-900.awsdns-50.net', 'ns-1000.awsdns-00.co.uk']) + + +@attr(route53=True) +class TestGetAllRRSetsRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestGetAllRRSetsRoute53, self).setUp() + + def default_body(self): + return b""" + + + + test.example.com. + A + 60 + + + 10.0.0.1 + + + + + www.example.com. + A + 60 + + + 10.0.0.2 + + + + + us-west-2-evaluate-health.example.com. + A + latency-example-us-west-2-evaluate-health + us-west-2 + + ABCDEFG123456 + true + example-123456-evaluate-health.us-west-2.elb.amazonaws.com. + + abcdefgh-abcd-abcd-abcd-abcdefghijkl + + + us-west-2-no-evaluate-health.example.com. + A + latency-example-us-west-2-no-evaluate-health + us-west-2 + + ABCDEFG567890 + false + example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com. + + abcdefgh-abcd-abcd-abcd-abcdefghijkl + + + failover.example.com. + A + failover-primary + PRIMARY + 60 + + + 10.0.0.4 + + + + + us-west-2-evaluate-health-healthcheck.example.com. + A + latency-example-us-west-2-evaluate-health-healthcheck + us-west-2 + + ABCDEFG123456 + true + example-123456-evaluate-health-healthcheck.us-west-2.elb.amazonaws.com. + + 076a32f8-86f7-4c9e-9fa2-c163d5be67d9 + + + false + 100 + + """ + + def test_get_all_rr_sets(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_rrsets("Z1111", + "A", + "example.com.") + + self.assertIn(self.actual_request.path, + ("/2013-04-01/hostedzone/Z1111/rrset?type=A&name=example.com.", + "/2013-04-01/hostedzone/Z1111/rrset?name=example.com.&type=A")) + + self.assertTrue(isinstance(response, ResourceRecordSets)) + self.assertEqual(response.hosted_zone_id, "Z1111") + self.assertTrue(isinstance(response[0], Record)) + + self.assertTrue(response[0].name, "test.example.com.") + self.assertTrue(response[0].ttl, "60") + self.assertTrue(response[0].type, "A") + + evaluate_record = response[2] + self.assertEqual(evaluate_record.name, 'us-west-2-evaluate-health.example.com.') + self.assertEqual(evaluate_record.type, 'A') + self.assertEqual(evaluate_record.identifier, 'latency-example-us-west-2-evaluate-health') + self.assertEqual(evaluate_record.region, 'us-west-2') + self.assertEqual(evaluate_record.alias_hosted_zone_id, 'ABCDEFG123456') + self.assertTrue(evaluate_record.alias_evaluate_target_health) + self.assertEqual(evaluate_record.alias_dns_name, 'example-123456-evaluate-health.us-west-2.elb.amazonaws.com.') + evaluate_xml = evaluate_record.to_xml() + self.assertTrue(evaluate_record.health_check, 'abcdefgh-abcd-abcd-abcd-abcdefghijkl') + self.assertTrue('true' in evaluate_xml) + + no_evaluate_record = response[3] + self.assertEqual(no_evaluate_record.name, 'us-west-2-no-evaluate-health.example.com.') + self.assertEqual(no_evaluate_record.type, 'A') + self.assertEqual(no_evaluate_record.identifier, 'latency-example-us-west-2-no-evaluate-health') + self.assertEqual(no_evaluate_record.region, 'us-west-2') + self.assertEqual(no_evaluate_record.alias_hosted_zone_id, 'ABCDEFG567890') + self.assertFalse(no_evaluate_record.alias_evaluate_target_health) + self.assertEqual(no_evaluate_record.alias_dns_name, 'example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com.') + no_evaluate_xml = no_evaluate_record.to_xml() + self.assertTrue(no_evaluate_record.health_check, 'abcdefgh-abcd-abcd-abcd-abcdefghijkl') + self.assertTrue('false' in no_evaluate_xml) + + failover_record = response[4] + self.assertEqual(failover_record.name, 'failover.example.com.') + self.assertEqual(failover_record.type, 'A') + self.assertEqual(failover_record.identifier, 'failover-primary') + self.assertEqual(failover_record.failover, 'PRIMARY') + self.assertEqual(failover_record.ttl, '60') + + healthcheck_record = response[5] + self.assertEqual(healthcheck_record.health_check, '076a32f8-86f7-4c9e-9fa2-c163d5be67d9') + self.assertEqual(healthcheck_record.name, 'us-west-2-evaluate-health-healthcheck.example.com.') + self.assertEqual(healthcheck_record.identifier, 'latency-example-us-west-2-evaluate-health-healthcheck') + self.assertEqual(healthcheck_record.alias_dns_name, 'example-123456-evaluate-health-healthcheck.us-west-2.elb.amazonaws.com.') + + +@attr(route53=True) +class TestTruncatedGetAllRRSetsRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestTruncatedGetAllRRSetsRoute53, self).setUp() + + def default_body(self): + return b""" + + + + example.com. + NS + 900 + + + ns-91.awsdns-41.co.uk. + + + ns-1929.awsdns-93.net. + + + ns-12.awsdns-21.org. + + + ns-102.awsdns-96.com. + + + + + example.com. + SOA + 1800 + + + ns-1929.awsdns-93.net. hostmaster.awsdns.net. 1 10800 3600 604800 1800 + + + + + wrr.example.com. + A + primary + 100 + 300 + + 127.0.0.1 + + + + true + wrr.example.com. + A + secondary + 3 +""" + + def paged_body(self): + return b""" + + + + wrr.example.com. + A + secondary + 50 + 300 + + 127.0.0.2 + + + + false + 3 +""" + + + def test_get_all_rr_sets(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_rrsets("Z1111", maxitems=3) + + # made first request + self.assertEqual(self.actual_request.path, '/2013-04-01/hostedzone/Z1111/rrset?maxitems=3') + + # anticipate a second request when we page it + self.set_http_response(status_code=200, body=self.paged_body()) + + # this should trigger another call to get_all_rrsets + self.assertEqual(len(list(response)), 4) + + url_parts = urllib.parse.urlparse(self.actual_request.path) + self.assertEqual(url_parts.path, '/2013-04-01/hostedzone/Z1111/rrset') + self.assertEqual(urllib.parse.parse_qs(url_parts.query), + dict(type=['A'], name=['wrr.example.com.'], identifier=['secondary'])) + + +@attr(route53=True) +class TestCreateHealthCheckRoute53IpAddress(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestCreateHealthCheckRoute53IpAddress, self).setUp() + + def default_body(self): + return b""" + + + 34778cf8-e31e-4974-bad0-b108bd1623d3 + 2fa48c8f-76ef-4253-9874-8bcb2b0d7694 + + 74.125.228.81 + 443 + HTTPS_STR_MATCH + OK + /health_check + 30 + 3 + + + + """ + + def test_create_health_check_ip_address(self): + self.set_http_response(status_code=201) + hc = HealthCheck(ip_addr='74.125.228.81', port=443, hc_type='HTTPS_STR_MATCH', resource_path='/health_check', string_match='OK') + hc_xml = hc.to_xml() + self.assertFalse('' in hc_xml) + self.assertTrue('' in hc_xml) + + response = self.service_connection.create_health_check(hc) + hc_resp = response['CreateHealthCheckResponse']['HealthCheck']['HealthCheckConfig'] + self.assertEqual(hc_resp['IPAddress'], '74.125.228.81') + self.assertEqual(hc_resp['ResourcePath'], '/health_check') + self.assertEqual(hc_resp['Type'], 'HTTPS_STR_MATCH') + self.assertEqual(hc_resp['Port'], '443') + self.assertEqual(hc_resp['ResourcePath'], '/health_check') + self.assertEqual(hc_resp['SearchString'], 'OK') + self.assertEqual(response['CreateHealthCheckResponse']['HealthCheck']['Id'], '34778cf8-e31e-4974-bad0-b108bd1623d3') + + +@attr(route53=True) +class TestGetCheckerIpRanges(AWSMockServiceTestCase): + connection_class = Route53Connection + + def default_body(self): + return b""" + + + 54.183.255.128/26 + 54.228.16.0/26 + 54.232.40.64/26 + 177.71.207.128/26 + 176.34.159.192/26 + + + """ + + def test_get_checker_ip_ranges(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_checker_ip_ranges() + ip_ranges = response['GetCheckerIpRangesResponse']['CheckerIpRanges'] + + self.assertEqual(len(ip_ranges), 5) + self.assertIn('54.183.255.128/26', ip_ranges) + self.assertIn('54.228.16.0/26', ip_ranges) + self.assertIn('54.232.40.64/26', ip_ranges) + self.assertIn('177.71.207.128/26', ip_ranges) + self.assertIn('176.34.159.192/26', ip_ranges) + + +@attr(route53=True) +class TestCreateHealthCheckRoute53FQDN(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestCreateHealthCheckRoute53FQDN, self).setUp() + + def default_body(self): + return b""" + + + f9abfe10-8d2a-4bbd-8f35-796f0f8572f2 + 3246ac17-b651-4295-a5c8-c132a59693d7 + + 443 + HTTPS + /health_check + example.com + 30 + 3 + + + + """ + + def test_create_health_check_fqdn(self): + self.set_http_response(status_code=201) + hc = HealthCheck(ip_addr='', port=443, hc_type='HTTPS', resource_path='/health_check', fqdn='example.com') + hc_xml = hc.to_xml() + self.assertTrue('' in hc_xml) + self.assertFalse('' in hc_xml) + + response = self.service_connection.create_health_check(hc) + hc_resp = response['CreateHealthCheckResponse']['HealthCheck']['HealthCheckConfig'] + self.assertEqual(hc_resp['FullyQualifiedDomainName'], 'example.com') + self.assertEqual(hc_resp['ResourcePath'], '/health_check') + self.assertEqual(hc_resp['Type'], 'HTTPS') + self.assertEqual(hc_resp['Port'], '443') + self.assertEqual(hc_resp['ResourcePath'], '/health_check') + self.assertEqual(response['CreateHealthCheckResponse']['HealthCheck']['Id'], 'f9abfe10-8d2a-4bbd-8f35-796f0f8572f2') + + +@attr(route53=True) +class TestChangeResourceRecordSetsRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestChangeResourceRecordSetsRoute53, self).setUp() + + def default_body(self): + return b""" + + + /change/C1111111111111 + PENDING + 2014-05-05T10:11:12.123Z + + + """ + + def test_record_commit(self): + rrsets = ResourceRecordSets(self.service_connection) + rrsets.add_change_record('CREATE', Record('vanilla.example.com', 'A', 60, ['1.2.3.4'])) + rrsets.add_change_record('CREATE', Record('alias.example.com', 'AAAA', alias_hosted_zone_id='Z123OTHER', alias_dns_name='target.other', alias_evaluate_target_health=True)) + rrsets.add_change_record('CREATE', Record('wrr.example.com', 'CNAME', 60, ['cname.target'], weight=10, identifier='weight-1')) + rrsets.add_change_record('CREATE', Record('lbr.example.com', 'TXT', 60, ['text record'], region='us-west-2', identifier='region-1')) + rrsets.add_change_record('CREATE', Record('failover.example.com', 'A', 60, ['2.2.2.2'], health_check='hc-1234', failover='PRIMARY', identifier='primary')) + + changes_xml = rrsets.to_xml() + + # the whitespacing doesn't match exactly, so we'll pretty print and drop all new lines + # not the best, but + actual_xml = re.sub(r"\s*[\r\n]+", "\n", xml.dom.minidom.parseString(changes_xml).toprettyxml()) + expected_xml = re.sub(r"\s*[\r\n]+", "\n", xml.dom.minidom.parseString(b""" + + + None + + + CREATE + + vanilla.example.com + A + 60 + + + 1.2.3.4 + + + + + + CREATE + + alias.example.com + AAAA + + Z123OTHER + target.other + true + + + + + CREATE + + wrr.example.com + CNAME + weight-1 + 10 + 60 + + + cname.target + + + + + + CREATE + + lbr.example.com + TXT + region-1 + us-west-2 + 60 + + + text record + + + + + + CREATE + + failover.example.com + A + primary + PRIMARY + 60 + + + 2.2.2.2 + + + hc-1234 + + + + + + """).toprettyxml()) + + # Note: the alias XML should not include the TTL, even if it's specified in the object model + self.assertEqual(actual_xml, expected_xml) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/route53/test_zone.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/route53/test_zone.py new file mode 100644 index 0000000000000000000000000000000000000000..12d1d25418f04cf49408f3756d0edabcfa6e49f5 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/route53/test_zone.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.route53.zone import Zone +from tests.compat import mock, unittest + + +class TestZone(unittest.TestCase): + def test_find_records(self): + mock_connection = mock.Mock() + zone = Zone(mock_connection, {}) + zone.id = None + rr_names = ['amazon.com', 'amazon.com', 'aws.amazon.com', + 'aws.amazon.com'] + mock_rrs = [] + # Create some mock resource records. + for rr_name in rr_names: + mock_rr = mock.Mock() + mock_rr.name = rr_name + mock_rr.type = 'A' + mock_rr.weight = None + mock_rr.region = None + mock_rrs.append(mock_rr) + + # Set the last resource record to ``None``. The ``find_records`` loop + # should never hit this. + mock_rrs[3] = None + + mock_connection.get_all_rrsets.return_value = mock_rrs + mock_connection._make_qualified.return_value = 'amazon.com' + + # Ensure that the ``None`` type object was not iterated over. + try: + result_rrs = zone.find_records('amazon.com', 'A', all=True) + except AttributeError as e: + self.fail("find_records() iterated too far into resource" + " record list.") + + # Determine that the resulting records are correct. + self.assertEqual(result_rrs, [mock_rrs[0], mock_rrs[1]]) + + +if __name__ == "__main__": + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_bucket.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_bucket.py new file mode 100644 index 0000000000000000000000000000000000000000..72e10ed2f925369483852105ad0c354361f19564 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_bucket.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +from mock import patch +import xml.dom.minidom + +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.exception import BotoClientError +from boto.s3.connection import S3Connection +from boto.s3.bucket import Bucket +from boto.s3.deletemarker import DeleteMarker +from boto.s3.key import Key +from boto.s3.multipart import MultiPartUpload +from boto.s3.prefix import Prefix + + +class TestS3Bucket(AWSMockServiceTestCase): + connection_class = S3Connection + + def setUp(self): + super(TestS3Bucket, self).setUp() + + def test_bucket_create_bucket(self): + self.set_http_response(status_code=200) + bucket = self.service_connection.create_bucket('mybucket_create') + self.assertEqual(bucket.name, 'mybucket_create') + + def test_bucket_constructor(self): + self.set_http_response(status_code=200) + bucket = Bucket(self.service_connection, 'mybucket_constructor') + self.assertEqual(bucket.name, 'mybucket_constructor') + + def test_bucket_basics(self): + self.set_http_response(status_code=200) + bucket = self.service_connection.create_bucket('mybucket') + self.assertEqual(bucket.__repr__(), '') + + def test_bucket_new_key(self): + self.set_http_response(status_code=200) + bucket = self.service_connection.create_bucket('mybucket') + key = bucket.new_key('mykey') + + self.assertEqual(key.bucket, bucket) + self.assertEqual(key.key, 'mykey') + + def test_bucket_new_key_missing_name(self): + self.set_http_response(status_code=200) + bucket = self.service_connection.create_bucket('mybucket') + + with self.assertRaises(ValueError): + key = bucket.new_key('') + + def test_bucket_delete_key_missing_name(self): + self.set_http_response(status_code=200) + bucket = self.service_connection.create_bucket('mybucket') + + with self.assertRaises(ValueError): + key = bucket.delete_key('') + + def test_bucket_kwargs_misspelling(self): + self.set_http_response(status_code=200) + bucket = self.service_connection.create_bucket('mybucket') + + with self.assertRaises(TypeError): + bucket.get_all_keys(delimeter='foo') + + def test__get_all_query_args(self): + bukket = Bucket() + + # Default. + qa = bukket._get_all_query_args({}) + self.assertEqual(qa, '') + + # Default with initial. + qa = bukket._get_all_query_args({}, 'initial=1') + self.assertEqual(qa, 'initial=1') + + # Single param. + qa = bukket._get_all_query_args({ + 'foo': 'true' + }) + self.assertEqual(qa, 'foo=true') + + # Single param with initial. + qa = bukket._get_all_query_args({ + 'foo': 'true' + }, 'initial=1') + self.assertEqual(qa, 'initial=1&foo=true') + + # Multiple params with all the weird cases. + multiple_params = { + 'foo': 'true', + # Ensure Unicode chars get encoded. + 'bar': '☃', + # Ensure unicode strings with non-ascii characters get encoded + 'baz': u'χ', + # Underscores are bad, m'kay? + 'some_other': 'thing', + # Change the variant of ``max-keys``. + 'maxkeys': 0, + # ``None`` values get excluded. + 'notthere': None, + # Empty values also get excluded. + 'notpresenteither': '', + } + qa = bukket._get_all_query_args(multiple_params) + self.assertEqual( + qa, + 'bar=%E2%98%83&baz=%CF%87&foo=true&max-keys=0&some-other=thing' + ) + + # Multiple params with initial. + qa = bukket._get_all_query_args(multiple_params, 'initial=1') + self.assertEqual( + qa, + 'initial=1&bar=%E2%98%83&baz=%CF%87&foo=true&max-keys=0&some-other=thing' + ) + + @patch.object(S3Connection, 'head_bucket') + def test_bucket_copy_key_no_validate(self, mock_head_bucket): + self.set_http_response(status_code=200) + bucket = self.service_connection.create_bucket('mybucket') + + self.assertFalse(mock_head_bucket.called) + self.service_connection.get_bucket('mybucket', validate=True) + self.assertTrue(mock_head_bucket.called) + + mock_head_bucket.reset_mock() + self.assertFalse(mock_head_bucket.called) + try: + bucket.copy_key('newkey', 'srcbucket', 'srckey', preserve_acl=True) + except: + # Will throw because of empty response. + pass + self.assertFalse(mock_head_bucket.called) + + @patch.object(Bucket, '_get_all') + def test_bucket_encoding(self, mock_get_all): + self.set_http_response(status_code=200) + bucket = self.service_connection.get_bucket('mybucket') + + # First, without the encoding. + mock_get_all.reset_mock() + bucket.get_all_keys() + mock_get_all.assert_called_with( + [ + ('Contents', Key), + ('CommonPrefixes', Prefix) + ], '', None + ) + + # Now the variants with the encoding. + mock_get_all.reset_mock() + bucket.get_all_keys(encoding_type='url') + mock_get_all.assert_called_with( + [ + ('Contents', Key), + ('CommonPrefixes', Prefix) + ], '', None, + encoding_type='url' + ) + + mock_get_all.reset_mock() + bucket.get_all_versions(encoding_type='url') + mock_get_all.assert_called_with( + [ + ('Version', Key), + ('CommonPrefixes', Prefix), + ('DeleteMarker', DeleteMarker), + ], 'versions', None, + encoding_type='url' + ) + + mock_get_all.reset_mock() + bucket.get_all_multipart_uploads(encoding_type='url') + mock_get_all.assert_called_with( + [ + ('Upload', MultiPartUpload), + ('CommonPrefixes', Prefix) + ], 'uploads', None, + encoding_type='url' + ) + + @patch.object(Bucket, 'get_all_keys') + @patch.object(Bucket, '_get_key_internal') + def test_bucket_get_key_no_validate(self, mock_gki, mock_gak): + self.set_http_response(status_code=200) + bucket = self.service_connection.get_bucket('mybucket') + key = bucket.get_key('mykey', validate=False) + + self.assertEqual(len(mock_gki.mock_calls), 0) + self.assertTrue(isinstance(key, Key)) + self.assertEqual(key.name, 'mykey') + + with self.assertRaises(BotoClientError): + bucket.get_key( + 'mykey', + version_id='something', + validate=False + ) + + def acl_policy(self): + return """ + + + owner_id + owner_display_name + + + + + grantee_id + grantee_display_name + + FULL_CONTROL + + + """ + + def test_bucket_acl_policy_namespace(self): + self.set_http_response(status_code=200) + bucket = self.service_connection.get_bucket('mybucket') + + self.set_http_response(status_code=200, body=self.acl_policy()) + policy = bucket.get_acl() + + xml_policy = policy.to_xml() + document = xml.dom.minidom.parseString(xml_policy) + namespace = document.documentElement.namespaceURI + self.assertEqual(namespace, 'http://s3.amazonaws.com/doc/2006-03-01/') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..5839a6a2d7eabb363f397c18377663c624def4dc --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_connection.py @@ -0,0 +1,240 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import time + +from tests.compat import mock, unittest +from tests.unit import AWSMockServiceTestCase +from tests.unit import MockServiceWithConfigTestCase + +from boto.s3.connection import S3Connection, HostRequiredError +from boto.s3.connection import S3ResponseError, Bucket + + +class TestSignatureAlteration(AWSMockServiceTestCase): + connection_class = S3Connection + + def test_unchanged(self): + self.assertEqual( + self.service_connection._required_auth_capability(), + ['s3'] + ) + + def test_switched(self): + conn = self.connection_class( + aws_access_key_id='less', + aws_secret_access_key='more', + host='s3.cn-north-1.amazonaws.com.cn' + ) + self.assertEqual( + conn._required_auth_capability(), + ['hmac-v4-s3'] + ) + + +class TestSigV4HostError(MockServiceWithConfigTestCase): + connection_class = S3Connection + + def test_historical_behavior(self): + self.assertEqual( + self.service_connection._required_auth_capability(), + ['s3'] + ) + self.assertEqual(self.service_connection.host, 's3.amazonaws.com') + + def test_sigv4_opt_in(self): + # Switch it at the config, so we can check to see how the host is + # handled. + self.config = { + 's3': { + 'use-sigv4': True, + } + } + + with self.assertRaises(HostRequiredError): + # No host+SigV4 == KABOOM + self.connection_class( + aws_access_key_id='less', + aws_secret_access_key='more' + ) + + # Ensure passing a ``host`` still works. + conn = self.connection_class( + aws_access_key_id='less', + aws_secret_access_key='more', + host='s3.cn-north-1.amazonaws.com.cn' + ) + self.assertEqual( + conn._required_auth_capability(), + ['hmac-v4-s3'] + ) + self.assertEqual( + conn.host, + 's3.cn-north-1.amazonaws.com.cn' + ) + + +class TestSigV4Presigned(MockServiceWithConfigTestCase): + connection_class = S3Connection + + def test_sigv4_presign(self): + self.config = { + 's3': { + 'use-sigv4': True, + } + } + + conn = self.connection_class( + aws_access_key_id='less', + aws_secret_access_key='more', + host='s3.amazonaws.com' + ) + + # Here we force an input iso_date to ensure we always get the + # same signature. + url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket', + key='test.txt', iso_date='20140625T000000Z') + + self.assertIn('a937f5fbc125d98ac8f04c49e0204ea1526a7b8ca058000a54c192457be05b7d', url) + + def test_sigv4_presign_optional_params(self): + self.config = { + 's3': { + 'use-sigv4': True, + } + } + + conn = self.connection_class( + aws_access_key_id='less', + aws_secret_access_key='more', + security_token='token', + host='s3.amazonaws.com' + ) + + url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket', + key='test.txt', version_id=2) + + self.assertIn('VersionId=2', url) + self.assertIn('X-Amz-Security-Token=token', url) + + def test_sigv4_presign_headers(self): + self.config = { + 's3': { + 'use-sigv4': True, + } + } + + conn = self.connection_class( + aws_access_key_id='less', + aws_secret_access_key='more', + host='s3.amazonaws.com' + ) + + headers = {'x-amz-meta-key': 'val'} + url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket', + key='test.txt', headers=headers) + + self.assertIn('host', url) + self.assertIn('x-amz-meta-key', url) + + +class TestUnicodeCallingFormat(AWSMockServiceTestCase): + connection_class = S3Connection + + def default_body(self): + return """ + + + bcaf1ffd86f461ca5fb16fd081034f + webfile + + + + quotes + 2006-02-03T16:45:09.000Z + + + samples + 2006-02-03T16:41:58.000Z + + +""" + + def create_service_connection(self, **kwargs): + kwargs['calling_format'] = u'boto.s3.connection.OrdinaryCallingFormat' + return super(TestUnicodeCallingFormat, + self).create_service_connection(**kwargs) + + def test_unicode_calling_format(self): + self.set_http_response(status_code=200) + self.service_connection.get_all_buckets() + + +class TestHeadBucket(AWSMockServiceTestCase): + connection_class = S3Connection + + def default_body(self): + # HEAD requests always have an empty body. + return "" + + def test_head_bucket_success(self): + self.set_http_response(status_code=200) + buck = self.service_connection.head_bucket('my-test-bucket') + self.assertTrue(isinstance(buck, Bucket)) + self.assertEqual(buck.name, 'my-test-bucket') + + def test_head_bucket_forbidden(self): + self.set_http_response(status_code=403) + + with self.assertRaises(S3ResponseError) as cm: + self.service_connection.head_bucket('cant-touch-this') + + err = cm.exception + self.assertEqual(err.status, 403) + self.assertEqual(err.error_code, 'AccessDenied') + self.assertEqual(err.message, 'Access Denied') + + def test_head_bucket_notfound(self): + self.set_http_response(status_code=404) + + with self.assertRaises(S3ResponseError) as cm: + self.service_connection.head_bucket('totally-doesnt-exist') + + err = cm.exception + self.assertEqual(err.status, 404) + self.assertEqual(err.error_code, 'NoSuchBucket') + self.assertEqual(err.message, 'The specified bucket does not exist') + + def test_head_bucket_other(self): + self.set_http_response(status_code=405) + + with self.assertRaises(S3ResponseError) as cm: + self.service_connection.head_bucket('you-broke-it') + + err = cm.exception + self.assertEqual(err.status, 405) + # We don't have special-cases for this error status. + self.assertEqual(err.error_code, None) + self.assertEqual(err.message, '') + + +if __name__ == "__main__": + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_cors_configuration.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_cors_configuration.py new file mode 100644 index 0000000000000000000000000000000000000000..8f6980364ca2c93a60898575ba4e6084c95ecdf6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_cors_configuration.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python + +import unittest +from boto.s3.cors import CORSConfiguration + +CORS_BODY_1 = ( + '' + '' + 'PUT' + 'POST' + 'DELETE' + 'http://www.example.com' + '*' + 'x-amz-server-side-encryption' + '3000' + 'foobar_rule' + '' + '') + +CORS_BODY_2 = ( + '' + '' + 'PUT' + 'POST' + 'DELETE' + 'http://www.example.com' + '*' + 'x-amz-server-side-encryption' + '3000' + '' + '' + 'GET' + '*' + '*' + '3000' + '' + '') + +CORS_BODY_3 = ( + '' + '' + 'GET' + '*' + '' + '') + + +class TestCORSConfiguration(unittest.TestCase): + + def test_one_rule_with_id(self): + cfg = CORSConfiguration() + cfg.add_rule(['PUT', 'POST', 'DELETE'], + 'http://www.example.com', + allowed_header='*', + max_age_seconds=3000, + expose_header='x-amz-server-side-encryption', + id='foobar_rule') + self.assertEqual(cfg.to_xml(), CORS_BODY_1) + + def test_two_rules(self): + cfg = CORSConfiguration() + cfg.add_rule(['PUT', 'POST', 'DELETE'], + 'http://www.example.com', + allowed_header='*', + max_age_seconds=3000, + expose_header='x-amz-server-side-encryption') + cfg.add_rule('GET', '*', allowed_header='*', max_age_seconds=3000) + self.assertEqual(cfg.to_xml(), CORS_BODY_2) + + def test_minimal(self): + cfg = CORSConfiguration() + cfg.add_rule('GET', '*') + self.assertEqual(cfg.to_xml(), CORS_BODY_3) + + +if __name__ == "__main__": + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_key.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_key.py new file mode 100644 index 0000000000000000000000000000000000000000..26e2fc82c943303e938bd3122908957756365ffb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_key.py @@ -0,0 +1,239 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from tests.compat import mock, unittest +from tests.unit import AWSMockServiceTestCase + +from boto.compat import StringIO +from boto.exception import BotoServerError +from boto.s3.connection import S3Connection +from boto.s3.bucket import Bucket +from boto.s3.key import Key + + +class TestS3Key(AWSMockServiceTestCase): + connection_class = S3Connection + + def setUp(self): + super(TestS3Key, self).setUp() + + def default_body(self): + return "default body" + + def test_unicode_name(self): + k = Key() + k.name = u'Österreich' + print(repr(k)) + + def test_when_no_restore_header_present(self): + self.set_http_response(status_code=200) + b = Bucket(self.service_connection, 'mybucket') + k = b.get_key('myglacierkey') + self.assertIsNone(k.ongoing_restore) + self.assertIsNone(k.expiry_date) + + def test_restore_header_with_ongoing_restore(self): + self.set_http_response( + status_code=200, + header=[('x-amz-restore', 'ongoing-request="true"')]) + b = Bucket(self.service_connection, 'mybucket') + k = b.get_key('myglacierkey') + self.assertTrue(k.ongoing_restore) + self.assertIsNone(k.expiry_date) + + def test_restore_completed(self): + self.set_http_response( + status_code=200, + header=[('x-amz-restore', + 'ongoing-request="false", ' + 'expiry-date="Fri, 21 Dec 2012 00:00:00 GMT"')]) + b = Bucket(self.service_connection, 'mybucket') + k = b.get_key('myglacierkey') + self.assertFalse(k.ongoing_restore) + self.assertEqual(k.expiry_date, 'Fri, 21 Dec 2012 00:00:00 GMT') + + def test_delete_key_return_key(self): + self.set_http_response(status_code=204, body='') + b = Bucket(self.service_connection, 'mybucket') + key = b.delete_key('fookey') + self.assertIsNotNone(key) + + def test_storage_class(self): + self.set_http_response(status_code=200) + b = Bucket(self.service_connection, 'mybucket') + k = b.get_key('fookey') + + # Mock out the bucket object - we really only care about calls + # to list. + k.bucket = mock.MagicMock() + + # Default behavior doesn't call list + k.set_contents_from_string('test') + k.bucket.list.assert_not_called() + + # Direct access calls list to get the real value if unset, + # and still defaults to STANDARD if unavailable. + sc_value = k.storage_class + self.assertEqual(sc_value, 'STANDARD') + k.bucket.list.assert_called_with(k.name.encode('utf-8')) + k.bucket.list.reset_mock() + + # Setting manually doesn't call list + k.storage_class = 'GLACIER' + k.set_contents_from_string('test') + k.bucket.list.assert_not_called() + + def test_change_storage_class(self): + self.set_http_response(status_code=200) + b = Bucket(self.service_connection, 'mybucket') + k = b.get_key('fookey') + + # Mock out Key.copy so we can record calls to it + k.copy = mock.MagicMock() + # Mock out the bucket so we don't actually need to have fake responses + k.bucket = mock.MagicMock() + k.bucket.name = 'mybucket' + + self.assertEqual(k.storage_class, 'STANDARD') + + # The default change_storage_class call should result in a copy to our + # bucket + k.change_storage_class('REDUCED_REDUNDANCY') + k.copy.assert_called_with( + 'mybucket', + 'fookey', + reduced_redundancy=True, + preserve_acl=True, + validate_dst_bucket=True, + ) + + def test_change_storage_class_new_bucket(self): + self.set_http_response(status_code=200) + b = Bucket(self.service_connection, 'mybucket') + k = b.get_key('fookey') + + # Mock out Key.copy so we can record calls to it + k.copy = mock.MagicMock() + # Mock out the bucket so we don't actually need to have fake responses + k.bucket = mock.MagicMock() + k.bucket.name = 'mybucket' + + self.assertEqual(k.storage_class, 'STANDARD') + # Specifying a different dst_bucket should result in a copy to the new + # bucket + k.copy.reset_mock() + k.change_storage_class('REDUCED_REDUNDANCY', dst_bucket='yourbucket') + k.copy.assert_called_with( + 'yourbucket', + 'fookey', + reduced_redundancy=True, + preserve_acl=True, + validate_dst_bucket=True, + ) + + +def counter(fn): + def _wrapper(*args, **kwargs): + _wrapper.count += 1 + return fn(*args, **kwargs) + _wrapper.count = 0 + return _wrapper + + +class TestS3KeyRetries(AWSMockServiceTestCase): + connection_class = S3Connection + + @mock.patch('time.sleep') + def test_500_retry(self, sleep_mock): + self.set_http_response(status_code=500) + b = Bucket(self.service_connection, 'mybucket') + k = b.new_key('test_failure') + fail_file = StringIO('This will attempt to retry.') + + with self.assertRaises(BotoServerError): + k.send_file(fail_file) + + @mock.patch('time.sleep') + def test_400_timeout(self, sleep_mock): + weird_timeout_body = "RequestTimeout" + self.set_http_response(status_code=400, body=weird_timeout_body) + b = Bucket(self.service_connection, 'mybucket') + k = b.new_key('test_failure') + fail_file = StringIO('This will pretend to be chunk-able.') + + k.should_retry = counter(k.should_retry) + self.assertEqual(k.should_retry.count, 0) + + with self.assertRaises(BotoServerError): + k.send_file(fail_file) + + self.assertTrue(k.should_retry.count, 1) + + @mock.patch('time.sleep') + def test_502_bad_gateway(self, sleep_mock): + weird_timeout_body = "BadGateway" + self.set_http_response(status_code=502, body=weird_timeout_body) + b = Bucket(self.service_connection, 'mybucket') + k = b.new_key('test_failure') + fail_file = StringIO('This will pretend to be chunk-able.') + + k.should_retry = counter(k.should_retry) + self.assertEqual(k.should_retry.count, 0) + + with self.assertRaises(BotoServerError): + k.send_file(fail_file) + + self.assertTrue(k.should_retry.count, 1) + + @mock.patch('time.sleep') + def test_504_gateway_timeout(self, sleep_mock): + weird_timeout_body = "GatewayTimeout" + self.set_http_response(status_code=504, body=weird_timeout_body) + b = Bucket(self.service_connection, 'mybucket') + k = b.new_key('test_failure') + fail_file = StringIO('This will pretend to be chunk-able.') + + k.should_retry = counter(k.should_retry) + self.assertEqual(k.should_retry.count, 0) + + with self.assertRaises(BotoServerError): + k.send_file(fail_file) + + self.assertTrue(k.should_retry.count, 1) + + +class TestFileError(unittest.TestCase): + def test_file_error(self): + key = Key() + + class CustomException(Exception): pass + + key.get_contents_to_file = mock.Mock( + side_effect=CustomException('File blew up!')) + + # Ensure our exception gets raised instead of a file or IO error + with self.assertRaises(CustomException): + key.get_contents_to_filename('foo.txt') + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_keyfile.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_keyfile.py new file mode 100644 index 0000000000000000000000000000000000000000..eca0bb9e6b957fbadd477e6833eb6dc1a9619a53 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_keyfile.py @@ -0,0 +1,114 @@ +# Copyright 2013 Google Inc. +# Copyright 2011, Nexenta Systems Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import os +import unittest +from boto.s3.keyfile import KeyFile +from tests.integration.s3.mock_storage_service import MockConnection +from tests.integration.s3.mock_storage_service import MockBucket + + +class KeyfileTest(unittest.TestCase): + + def setUp(self): + service_connection = MockConnection() + self.contents = '0123456789' + bucket = MockBucket(service_connection, 'mybucket') + key = bucket.new_key('mykey') + key.set_contents_from_string(self.contents) + self.keyfile = KeyFile(key) + + def tearDown(self): + self.keyfile.close() + + def testReadFull(self): + self.assertEqual(self.keyfile.read(len(self.contents)), self.contents) + + def testReadPartial(self): + self.assertEqual(self.keyfile.read(5), self.contents[:5]) + self.assertEqual(self.keyfile.read(5), self.contents[5:]) + + def testTell(self): + self.assertEqual(self.keyfile.tell(), 0) + self.keyfile.read(4) + self.assertEqual(self.keyfile.tell(), 4) + self.keyfile.read(6) + self.assertEqual(self.keyfile.tell(), 10) + self.keyfile.close() + try: + self.keyfile.tell() + except ValueError as e: + self.assertEqual(str(e), 'I/O operation on closed file') + + def testSeek(self): + self.assertEqual(self.keyfile.read(4), self.contents[:4]) + self.keyfile.seek(0) + self.assertEqual(self.keyfile.read(4), self.contents[:4]) + self.keyfile.seek(5) + self.assertEqual(self.keyfile.read(5), self.contents[5:]) + + # Seeking negative should raise. + try: + self.keyfile.seek(-5) + except IOError as e: + self.assertEqual(str(e), 'Invalid argument') + + # Reading past end of file is supposed to return empty string. + self.keyfile.read(10) + self.assertEqual(self.keyfile.read(20), '') + + # Seeking past end of file is supposed to silently work. + self.keyfile.seek(50) + self.assertEqual(self.keyfile.tell(), 50) + self.assertEqual(self.keyfile.read(1), '') + + def testSeekEnd(self): + self.assertEqual(self.keyfile.read(4), self.contents[:4]) + self.keyfile.seek(0, os.SEEK_END) + self.assertEqual(self.keyfile.read(1), '') + self.keyfile.seek(-1, os.SEEK_END) + self.assertEqual(self.keyfile.tell(), 9) + self.assertEqual(self.keyfile.read(1), '9') + # Test attempt to seek backwards past the start from the end. + try: + self.keyfile.seek(-100, os.SEEK_END) + except IOError as e: + self.assertEqual(str(e), 'Invalid argument') + + def testSeekCur(self): + self.assertEqual(self.keyfile.read(1), self.contents[0]) + self.keyfile.seek(1, os.SEEK_CUR) + self.assertEqual(self.keyfile.tell(), 2) + self.assertEqual(self.keyfile.read(4), self.contents[2:6]) + + def testSetEtag(self): + # Make sure both bytes and strings work as contents. This is one of the + # very few places Boto uses the mock key object. + # https://github.com/GoogleCloudPlatform/gsutil/issues/214#issuecomment-49906044 + self.keyfile.key.data = b'test' + self.keyfile.key.set_etag() + self.assertEqual(self.keyfile.key.etag, '098f6bcd4621d373cade4e832627b4f6') + + self.keyfile.key.etag = None + self.keyfile.key.data = 'test' + self.keyfile.key.set_etag() + self.assertEqual(self.keyfile.key.etag, '098f6bcd4621d373cade4e832627b4f6') diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_lifecycle.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_lifecycle.py new file mode 100644 index 0000000000000000000000000000000000000000..da50f3a8c9aedb35c583793a0689f56974f72083 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_lifecycle.py @@ -0,0 +1,97 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from tests.unit import AWSMockServiceTestCase + +from boto.s3.connection import S3Connection +from boto.s3.bucket import Bucket +from boto.s3.lifecycle import Rule, Lifecycle, Transition + + +class TestS3LifeCycle(AWSMockServiceTestCase): + connection_class = S3Connection + + def default_body(self): + return """ + + + rule-1 + prefix/foo + Enabled + + 30 + GLACIER + + + 365 + + + + rule-2 + prefix/bar + Disabled + + 2012-12-31T00:00:000Z + GLACIER + + + + """ + + def test_parse_lifecycle_response(self): + self.set_http_response(status_code=200) + bucket = Bucket(self.service_connection, 'mybucket') + response = bucket.get_lifecycle_config() + self.assertEqual(len(response), 2) + rule = response[0] + self.assertEqual(rule.id, 'rule-1') + self.assertEqual(rule.prefix, 'prefix/foo') + self.assertEqual(rule.status, 'Enabled') + self.assertEqual(rule.expiration.days, 365) + self.assertIsNone(rule.expiration.date) + transition = rule.transition + self.assertEqual(transition.days, 30) + self.assertEqual(transition.storage_class, 'GLACIER') + self.assertEqual(response[1].transition.date, '2012-12-31T00:00:000Z') + + def test_expiration_with_no_transition(self): + lifecycle = Lifecycle() + lifecycle.add_rule('myid', 'prefix', 'Enabled', 30) + xml = lifecycle.to_xml() + self.assertIn('30', xml) + + def test_expiration_is_optional(self): + t = Transition(days=30, storage_class='GLACIER') + r = Rule('myid', 'prefix', 'Enabled', expiration=None, + transition=t) + xml = r.to_xml() + self.assertIn( + 'GLACIER30', + xml) + + def test_expiration_with_expiration_and_transition(self): + t = Transition(date='2012-11-30T00:00:000Z', storage_class='GLACIER') + r = Rule('myid', 'prefix', 'Enabled', expiration=30, transition=t) + xml = r.to_xml() + self.assertIn( + 'GLACIER' + '2012-11-30T00:00:000Z', xml) + self.assertIn('30', xml) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_tagging.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_tagging.py new file mode 100644 index 0000000000000000000000000000000000000000..02b5f5300baa5f541be8372d6e3a874ee3de3eaa --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_tagging.py @@ -0,0 +1,47 @@ +from tests.unit import AWSMockServiceTestCase + +from boto.s3.connection import S3Connection +from boto.s3.bucket import Bucket +from boto.s3.tagging import Tag + + +class TestS3Tagging(AWSMockServiceTestCase): + connection_class = S3Connection + + def default_body(self): + return """ + + + + Project + Project One + + + User + jsmith + + + + """ + + def test_parse_tagging_response(self): + self.set_http_response(status_code=200) + b = Bucket(self.service_connection, 'mybucket') + api_response = b.get_tags() + # The outer list is a list of tag sets. + self.assertEqual(len(api_response), 1) + # The inner list is a list of tags. + self.assertEqual(len(api_response[0]), 2) + self.assertEqual(api_response[0][0].key, 'Project') + self.assertEqual(api_response[0][0].value, 'Project One') + self.assertEqual(api_response[0][1].key, 'User') + self.assertEqual(api_response[0][1].value, 'jsmith') + + def test_tag_equality(self): + t1 = Tag('foo', 'bar') + t2 = Tag('foo', 'bar') + t3 = Tag('foo', 'baz') + t4 = Tag('baz', 'bar') + self.assertEqual(t1, t2) + self.assertNotEqual(t1, t3) + self.assertNotEqual(t1, t4) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_uri.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_uri.py new file mode 100644 index 0000000000000000000000000000000000000000..89149923640a899bc55d100c6685b390a87c7991 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_uri.py @@ -0,0 +1,265 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import boto +import tempfile + +from boto.exception import InvalidUriError +from boto import storage_uri +from boto.compat import urllib +from boto.s3.keyfile import KeyFile +from tests.integration.s3.mock_storage_service import MockBucket +from tests.integration.s3.mock_storage_service import MockBucketStorageUri +from tests.integration.s3.mock_storage_service import MockConnection +from tests.unit import unittest + +"""Unit tests for StorageUri interface.""" + +class UriTest(unittest.TestCase): + + def test_provider_uri(self): + for prov in ('gs', 's3'): + uri_str = '%s://' % prov + uri = boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + self.assertEqual(prov, uri.scheme) + self.assertEqual(uri_str, uri.uri) + self.assertFalse(hasattr(uri, 'versionless_uri')) + self.assertEqual('', uri.bucket_name) + self.assertEqual('', uri.object_name) + self.assertEqual(None, uri.version_id) + self.assertEqual(None, uri.generation) + self.assertEqual(uri.names_provider(), True) + self.assertEqual(uri.names_container(), True) + self.assertEqual(uri.names_bucket(), False) + self.assertEqual(uri.names_object(), False) + self.assertEqual(uri.names_directory(), False) + self.assertEqual(uri.names_file(), False) + self.assertEqual(uri.is_stream(), False) + self.assertEqual(uri.is_version_specific, False) + + def test_bucket_uri_no_trailing_slash(self): + for prov in ('gs', 's3'): + uri_str = '%s://bucket' % prov + uri = boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + self.assertEqual(prov, uri.scheme) + self.assertEqual('%s/' % uri_str, uri.uri) + self.assertFalse(hasattr(uri, 'versionless_uri')) + self.assertEqual('bucket', uri.bucket_name) + self.assertEqual('', uri.object_name) + self.assertEqual(None, uri.version_id) + self.assertEqual(None, uri.generation) + self.assertEqual(uri.names_provider(), False) + self.assertEqual(uri.names_container(), True) + self.assertEqual(uri.names_bucket(), True) + self.assertEqual(uri.names_object(), False) + self.assertEqual(uri.names_directory(), False) + self.assertEqual(uri.names_file(), False) + self.assertEqual(uri.is_stream(), False) + self.assertEqual(uri.is_version_specific, False) + + def test_bucket_uri_with_trailing_slash(self): + for prov in ('gs', 's3'): + uri_str = '%s://bucket/' % prov + uri = boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + self.assertEqual(prov, uri.scheme) + self.assertEqual(uri_str, uri.uri) + self.assertFalse(hasattr(uri, 'versionless_uri')) + self.assertEqual('bucket', uri.bucket_name) + self.assertEqual('', uri.object_name) + self.assertEqual(None, uri.version_id) + self.assertEqual(None, uri.generation) + self.assertEqual(uri.names_provider(), False) + self.assertEqual(uri.names_container(), True) + self.assertEqual(uri.names_bucket(), True) + self.assertEqual(uri.names_object(), False) + self.assertEqual(uri.names_directory(), False) + self.assertEqual(uri.names_file(), False) + self.assertEqual(uri.is_stream(), False) + self.assertEqual(uri.is_version_specific, False) + + def test_non_versioned_object_uri(self): + for prov in ('gs', 's3'): + uri_str = '%s://bucket/obj/a/b' % prov + uri = boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + self.assertEqual(prov, uri.scheme) + self.assertEqual(uri_str, uri.uri) + self.assertEqual(uri_str, uri.versionless_uri) + self.assertEqual('bucket', uri.bucket_name) + self.assertEqual('obj/a/b', uri.object_name) + self.assertEqual(None, uri.version_id) + self.assertEqual(None, uri.generation) + self.assertEqual(uri.names_provider(), False) + self.assertEqual(uri.names_container(), False) + self.assertEqual(uri.names_bucket(), False) + self.assertEqual(uri.names_object(), True) + self.assertEqual(uri.names_directory(), False) + self.assertEqual(uri.names_file(), False) + self.assertEqual(uri.is_stream(), False) + self.assertEqual(uri.is_version_specific, False) + + def test_versioned_gs_object_uri(self): + uri_str = 'gs://bucket/obj/a/b#1359908801674000' + uri = boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + self.assertEqual('gs', uri.scheme) + self.assertEqual(uri_str, uri.uri) + self.assertEqual('gs://bucket/obj/a/b', uri.versionless_uri) + self.assertEqual('bucket', uri.bucket_name) + self.assertEqual('obj/a/b', uri.object_name) + self.assertEqual(None, uri.version_id) + self.assertEqual(1359908801674000, uri.generation) + self.assertEqual(uri.names_provider(), False) + self.assertEqual(uri.names_container(), False) + self.assertEqual(uri.names_bucket(), False) + self.assertEqual(uri.names_object(), True) + self.assertEqual(uri.names_directory(), False) + self.assertEqual(uri.names_file(), False) + self.assertEqual(uri.is_stream(), False) + self.assertEqual(uri.is_version_specific, True) + + def test_versioned_gs_object_uri_with_legacy_generation_value(self): + uri_str = 'gs://bucket/obj/a/b#1' + uri = boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + self.assertEqual('gs', uri.scheme) + self.assertEqual(uri_str, uri.uri) + self.assertEqual('gs://bucket/obj/a/b', uri.versionless_uri) + self.assertEqual('bucket', uri.bucket_name) + self.assertEqual('obj/a/b', uri.object_name) + self.assertEqual(None, uri.version_id) + self.assertEqual(1, uri.generation) + self.assertEqual(uri.names_provider(), False) + self.assertEqual(uri.names_container(), False) + self.assertEqual(uri.names_bucket(), False) + self.assertEqual(uri.names_object(), True) + self.assertEqual(uri.names_directory(), False) + self.assertEqual(uri.names_file(), False) + self.assertEqual(uri.is_stream(), False) + self.assertEqual(uri.is_version_specific, True) + + def test_roundtrip_versioned_gs_object_uri_parsed(self): + uri_str = 'gs://bucket/obj#1359908801674000' + uri = boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + roundtrip_uri = boto.storage_uri(uri.uri, validate=False, + suppress_consec_slashes=False) + self.assertEqual(uri.uri, roundtrip_uri.uri) + self.assertEqual(uri.is_version_specific, True) + + def test_versioned_s3_object_uri(self): + uri_str = 's3://bucket/obj/a/b#eMuM0J15HkJ9QHlktfNP5MfA.oYR2q6S' + uri = boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + self.assertEqual('s3', uri.scheme) + self.assertEqual(uri_str, uri.uri) + self.assertEqual('s3://bucket/obj/a/b', uri.versionless_uri) + self.assertEqual('bucket', uri.bucket_name) + self.assertEqual('obj/a/b', uri.object_name) + self.assertEqual('eMuM0J15HkJ9QHlktfNP5MfA.oYR2q6S', uri.version_id) + self.assertEqual(None, uri.generation) + self.assertEqual(uri.names_provider(), False) + self.assertEqual(uri.names_container(), False) + self.assertEqual(uri.names_bucket(), False) + self.assertEqual(uri.names_object(), True) + self.assertEqual(uri.names_directory(), False) + self.assertEqual(uri.names_file(), False) + self.assertEqual(uri.is_stream(), False) + self.assertEqual(uri.is_version_specific, True) + + def test_explicit_file_uri(self): + tmp_dir = tempfile.tempdir or '' + uri_str = 'file://%s' % urllib.request.pathname2url(tmp_dir) + uri = boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + self.assertEqual('file', uri.scheme) + self.assertEqual(uri_str, uri.uri) + self.assertFalse(hasattr(uri, 'versionless_uri')) + self.assertEqual('', uri.bucket_name) + self.assertEqual(tmp_dir, uri.object_name) + self.assertFalse(hasattr(uri, 'version_id')) + self.assertFalse(hasattr(uri, 'generation')) + self.assertFalse(hasattr(uri, 'is_version_specific')) + self.assertEqual(uri.names_provider(), False) + self.assertEqual(uri.names_bucket(), False) + # Don't check uri.names_container(), uri.names_directory(), + # uri.names_file(), or uri.names_object(), because for file URIs these + # functions look at the file system and apparently unit tests run + # chroot'd. + self.assertEqual(uri.is_stream(), False) + + def test_implicit_file_uri(self): + tmp_dir = tempfile.tempdir or '' + uri_str = '%s' % urllib.request.pathname2url(tmp_dir) + uri = boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + self.assertEqual('file', uri.scheme) + self.assertEqual('file://%s' % tmp_dir, uri.uri) + self.assertFalse(hasattr(uri, 'versionless_uri')) + self.assertEqual('', uri.bucket_name) + self.assertEqual(tmp_dir, uri.object_name) + self.assertFalse(hasattr(uri, 'version_id')) + self.assertFalse(hasattr(uri, 'generation')) + self.assertFalse(hasattr(uri, 'is_version_specific')) + self.assertEqual(uri.names_provider(), False) + self.assertEqual(uri.names_bucket(), False) + # Don't check uri.names_container(), uri.names_directory(), + # uri.names_file(), or uri.names_object(), because for file URIs these + # functions look at the file system and apparently unit tests run + # chroot'd. + self.assertEqual(uri.is_stream(), False) + + def test_gs_object_uri_contains_sharp_not_matching_version_syntax(self): + uri_str = 'gs://bucket/obj#13a990880167400' + uri = boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + self.assertEqual('gs', uri.scheme) + self.assertEqual(uri_str, uri.uri) + self.assertEqual('gs://bucket/obj#13a990880167400', + uri.versionless_uri) + self.assertEqual('bucket', uri.bucket_name) + self.assertEqual('obj#13a990880167400', uri.object_name) + self.assertEqual(None, uri.version_id) + self.assertEqual(None, uri.generation) + self.assertEqual(uri.names_provider(), False) + self.assertEqual(uri.names_container(), False) + self.assertEqual(uri.names_bucket(), False) + self.assertEqual(uri.names_object(), True) + self.assertEqual(uri.names_directory(), False) + self.assertEqual(uri.names_file(), False) + self.assertEqual(uri.is_stream(), False) + self.assertEqual(uri.is_version_specific, False) + + def test_file_containing_colon(self): + uri_str = 'abc:def' + uri = boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + self.assertEqual('file', uri.scheme) + self.assertEqual('file://%s' % uri_str, uri.uri) + + def test_invalid_scheme(self): + uri_str = 'mars://bucket/object' + try: + boto.storage_uri(uri_str, validate=False, + suppress_consec_slashes=False) + except InvalidUriError as e: + self.assertIn('Unrecognized scheme', e.message) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_website.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_website.py new file mode 100644 index 0000000000000000000000000000000000000000..b7d1bcd616cb92c74b9527d3d31888734e4d232d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/s3/test_website.py @@ -0,0 +1,230 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.unit import unittest +import xml.dom.minidom +import xml.sax + +from boto.s3.website import WebsiteConfiguration +from boto.s3.website import RedirectLocation +from boto.s3.website import RoutingRules +from boto.s3.website import Condition +from boto.s3.website import RoutingRules +from boto.s3.website import RoutingRule +from boto.s3.website import Redirect +from boto import handler + + +def pretty_print_xml(text): + text = ''.join(t.strip() for t in text.splitlines()) + x = xml.dom.minidom.parseString(text) + return x.toprettyxml() + + +class TestS3WebsiteConfiguration(unittest.TestCase): + maxDiff = None + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_suffix_only(self): + config = WebsiteConfiguration(suffix='index.html') + xml = config.to_xml() + self.assertIn( + 'index.html', xml) + + def test_suffix_and_error(self): + config = WebsiteConfiguration(suffix='index.html', + error_key='error.html') + xml = config.to_xml() + self.assertIn( + 'error.html', xml) + + def test_redirect_all_request_to_with_just_host(self): + location = RedirectLocation(hostname='example.com') + config = WebsiteConfiguration(redirect_all_requests_to=location) + xml = config.to_xml() + self.assertIn( + ('' + 'example.com'), xml) + + def test_redirect_all_requests_with_protocol(self): + location = RedirectLocation(hostname='example.com', protocol='https') + config = WebsiteConfiguration(redirect_all_requests_to=location) + xml = config.to_xml() + self.assertIn( + ('' + 'example.comhttps' + ''), xml) + + def test_routing_rules_key_prefix(self): + x = pretty_print_xml + # This rule redirects requests for docs/* to documentation/* + rules = RoutingRules() + condition = Condition(key_prefix='docs/') + redirect = Redirect(replace_key_prefix='documents/') + rules.add_rule(RoutingRule(condition, redirect)) + config = WebsiteConfiguration(suffix='index.html', routing_rules=rules) + xml = config.to_xml() + + expected_xml = """ + + + index.html + + + + + docs/ + + + documents/ + + + + + """ + self.assertEqual(x(expected_xml), x(xml)) + + def test_routing_rules_to_host_on_404(self): + x = pretty_print_xml + # Another example from the docs: + # Redirect requests to a specific host in the event of a 404. + # Also, the redirect inserts a report-404/. For example, + # if you request a page ExamplePage.html and it results + # in a 404, the request is routed to a page report-404/ExamplePage.html + rules = RoutingRules() + condition = Condition(http_error_code=404) + redirect = Redirect(hostname='example.com', + replace_key_prefix='report-404/') + rules.add_rule(RoutingRule(condition, redirect)) + config = WebsiteConfiguration(suffix='index.html', routing_rules=rules) + xml = config.to_xml() + + expected_xml = """ + + + index.html + + + + + 404 + + + example.com + report-404/ + + + + + """ + self.assertEqual(x(expected_xml), x(xml)) + + def test_key_prefix(self): + x = pretty_print_xml + rules = RoutingRules() + condition = Condition(key_prefix="images/") + redirect = Redirect(replace_key='folderdeleted.html') + rules.add_rule(RoutingRule(condition, redirect)) + config = WebsiteConfiguration(suffix='index.html', routing_rules=rules) + xml = config.to_xml() + + expected_xml = """ + + + index.html + + + + + images/ + + + folderdeleted.html + + + + + """ + self.assertEqual(x(expected_xml), x(xml)) + + def test_builders(self): + x = pretty_print_xml + # This is a more declarative way to create rules. + # First the long way. + rules = RoutingRules() + condition = Condition(http_error_code=404) + redirect = Redirect(hostname='example.com', + replace_key_prefix='report-404/') + rules.add_rule(RoutingRule(condition, redirect)) + xml = rules.to_xml() + + # Then the more concise way. + rules2 = RoutingRules().add_rule( + RoutingRule.when(http_error_code=404).then_redirect( + hostname='example.com', replace_key_prefix='report-404/')) + xml2 = rules2.to_xml() + self.assertEqual(x(xml), x(xml2)) + + def test_parse_xml(self): + x = pretty_print_xml + xml_in = """ + + + index.html + + + error.html + + + + + docs/ + + + https + www.example.com + documents/ + 302 + + + + + 404 + + + example.com + report-404/ + + + + + """ + webconfig = WebsiteConfiguration() + h = handler.XmlHandler(webconfig, None) + xml.sax.parseString(xml_in.encode('utf-8'), h) + xml_out = webconfig.to_xml() + self.assertEqual(x(xml_in), x(xml_out)) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ses/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/ses/test_identity.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/ses/test_identity.py new file mode 100644 index 0000000000000000000000000000000000000000..1187514abb2098e06ac5726798fd2c510b1005af --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/ses/test_identity.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.jsonresponse import ListElement +from boto.ses.connection import SESConnection + + +class TestSESIdentity(AWSMockServiceTestCase): + connection_class = SESConnection + + def setUp(self): + super(TestSESIdentity, self).setUp() + + def default_body(self): + return b""" + + + + test@amazon.com + + true + Success + + vvjuipp74whm76gqoni7qmwwn4w4qusjiainivf6f + 3frqe7jn4obpuxjpwpolz6ipb3k5nvt2nhjpik2oy + wrqplteh7oodxnad7hsl4mixg2uavzneazxv5sxi2 + + + + + secondtest@amazon.com + + false + NotStarted + + + + + + bb5a105d-c468-11e1-82eb-dff885ccc06a + +""" + + def test_ses_get_identity_dkim_list(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .get_identity_dkim_attributes(['test@amazon.com', 'secondtest@amazon.com']) + + response = response['GetIdentityDkimAttributesResponse'] + result = response['GetIdentityDkimAttributesResult'] + + first_entry = result['DkimAttributes'][0] + entry_key = first_entry['key'] + attributes = first_entry['value'] + tokens = attributes['DkimTokens'] + + self.assertEqual(entry_key, 'test@amazon.com') + self.assertEqual(ListElement, type(tokens)) + self.assertEqual(3, len(tokens)) + self.assertEqual('vvjuipp74whm76gqoni7qmwwn4w4qusjiainivf6f', + tokens[0]) + self.assertEqual('3frqe7jn4obpuxjpwpolz6ipb3k5nvt2nhjpik2oy', + tokens[1]) + self.assertEqual('wrqplteh7oodxnad7hsl4mixg2uavzneazxv5sxi2', + tokens[2]) + + second_entry = result['DkimAttributes'][1] + entry_key = second_entry['key'] + attributes = second_entry['value'] + dkim_enabled = attributes['DkimEnabled'] + dkim_verification_status = attributes['DkimVerificationStatus'] + + self.assertEqual(entry_key, 'secondtest@amazon.com') + self.assertEqual(dkim_enabled, 'false') + self.assertEqual(dkim_verification_status, 'NotStarted') + + +class TestSESSetIdentityNotificationTopic(AWSMockServiceTestCase): + connection_class = SESConnection + + def setUp(self): + super(TestSESSetIdentityNotificationTopic, self).setUp() + + def default_body(self): + return b""" + + + 299f4af4-b72a-11e1-901f-1fbd90e8104f + + """ + + def test_ses_set_identity_notification_topic_bounce(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .set_identity_notification_topic( + identity='user@example.com', + notification_type='Bounce', + sns_topic='arn:aws:sns:us-east-1:123456789012:example') + + response = response['SetIdentityNotificationTopicResponse'] + result = response['SetIdentityNotificationTopicResult'] + + self.assertEqual(2, len(response)) + self.assertEqual(0, len(result)) + + def test_ses_set_identity_notification_topic_complaint(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .set_identity_notification_topic( + identity='user@example.com', + notification_type='Complaint', + sns_topic='arn:aws:sns:us-east-1:123456789012:example') + + response = response['SetIdentityNotificationTopicResponse'] + result = response['SetIdentityNotificationTopicResult'] + + self.assertEqual(2, len(response)) + self.assertEqual(0, len(result)) + + +class TestSESSetIdentityFeedbackForwardingEnabled(AWSMockServiceTestCase): + connection_class = SESConnection + + def setUp(self): + super(TestSESSetIdentityFeedbackForwardingEnabled, self).setUp() + + def default_body(self): + return b""" + + + 299f4af4-b72a-11e1-901f-1fbd90e8104f + + """ + + def test_ses_set_identity_feedback_forwarding_enabled_true(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .set_identity_feedback_forwarding_enabled( + identity='user@example.com', + forwarding_enabled=True) + + response = response['SetIdentityFeedbackForwardingEnabledResponse'] + result = response['SetIdentityFeedbackForwardingEnabledResult'] + + self.assertEqual(2, len(response)) + self.assertEqual(0, len(result)) + + def test_ses_set_identity_notification_topic_enabled_false(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .set_identity_feedback_forwarding_enabled( + identity='user@example.com', + forwarding_enabled=False) + + response = response['SetIdentityFeedbackForwardingEnabledResponse'] + result = response['SetIdentityFeedbackForwardingEnabledResult'] + + self.assertEqual(2, len(response)) + self.assertEqual(0, len(result)) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/sns/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/sns/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/sns/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/sns/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..2cce22115ea41bba3f1b8ce1e15d7f520fd27c06 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/sns/test_connection.py @@ -0,0 +1,281 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import json +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase +from mock import Mock + +from boto.sns.connection import SNSConnection + +QUEUE_POLICY = { + u'Policy': + (u'{"Version":"2008-10-17","Id":"arn:aws:sqs:us-east-1:' + 'idnum:testqueuepolicy/SQSDefaultPolicy","Statement":' + '[{"Sid":"sidnum","Effect":"Allow","Principal":{"AWS":"*"},' + '"Action":"SQS:GetQueueUrl","Resource":' + '"arn:aws:sqs:us-east-1:idnum:testqueuepolicy"}]}')} + + +class TestSNSConnection(AWSMockServiceTestCase): + connection_class = SNSConnection + + def setUp(self): + super(TestSNSConnection, self).setUp() + + def default_body(self): + return b"{}" + + def test_sqs_with_existing_policy(self): + self.set_http_response(status_code=200) + + queue = Mock() + queue.get_attributes.return_value = QUEUE_POLICY + queue.arn = 'arn:aws:sqs:us-east-1:idnum:queuename' + + self.service_connection.subscribe_sqs_queue('topic_arn', queue) + self.assert_request_parameters({ + 'Action': 'Subscribe', + 'ContentType': 'JSON', + 'Endpoint': 'arn:aws:sqs:us-east-1:idnum:queuename', + 'Protocol': 'sqs', + 'TopicArn': 'topic_arn', + 'Version': '2010-03-31', + }, ignore_params_values=[]) + + # Verify that the queue policy was properly updated. + actual_policy = json.loads(queue.set_attribute.call_args[0][1]) + self.assertEqual(actual_policy['Version'], '2008-10-17') + # A new statement should be appended to the end of the statement list. + self.assertEqual(len(actual_policy['Statement']), 2) + self.assertEqual(actual_policy['Statement'][1]['Action'], + 'SQS:SendMessage') + + def test_sqs_with_no_previous_policy(self): + self.set_http_response(status_code=200) + + queue = Mock() + queue.get_attributes.return_value = {} + queue.arn = 'arn:aws:sqs:us-east-1:idnum:queuename' + + self.service_connection.subscribe_sqs_queue('topic_arn', queue) + self.assert_request_parameters({ + 'Action': 'Subscribe', + 'ContentType': 'JSON', + 'Endpoint': 'arn:aws:sqs:us-east-1:idnum:queuename', + 'Protocol': 'sqs', + 'TopicArn': 'topic_arn', + 'Version': '2010-03-31', + }, ignore_params_values=[]) + actual_policy = json.loads(queue.set_attribute.call_args[0][1]) + # Only a single statement should be part of the policy. + self.assertEqual(len(actual_policy['Statement']), 1) + + def test_publish_with_positional_args(self): + self.set_http_response(status_code=200) + + self.service_connection.publish('topic', 'message', 'subject') + self.assert_request_parameters({ + 'Action': 'Publish', + 'TopicArn': 'topic', + 'Subject': 'subject', + 'Message': 'message', + }, ignore_params_values=['Version', 'ContentType']) + + def test_publish_with_kwargs(self): + self.set_http_response(status_code=200) + + self.service_connection.publish(topic='topic', + message='message', + subject='subject') + self.assert_request_parameters({ + 'Action': 'Publish', + 'TopicArn': 'topic', + 'Subject': 'subject', + 'Message': 'message', + }, ignore_params_values=['Version', 'ContentType']) + + def test_publish_with_target_arn(self): + self.set_http_response(status_code=200) + + self.service_connection.publish(target_arn='target_arn', + message='message', + subject='subject') + self.assert_request_parameters({ + 'Action': 'Publish', + 'TargetArn': 'target_arn', + 'Subject': 'subject', + 'Message': 'message', + }, ignore_params_values=['Version', 'ContentType']) + + def test_create_platform_application(self): + self.set_http_response(status_code=200) + + self.service_connection.create_platform_application( + name='MyApp', + platform='APNS', + attributes={ + 'PlatformPrincipal': 'a ssl certificate', + 'PlatformCredential': 'a private key' + } + ) + self.assert_request_parameters({ + 'Action': 'CreatePlatformApplication', + 'Name': 'MyApp', + 'Platform': 'APNS', + 'Attributes.entry.1.key': 'PlatformCredential', + 'Attributes.entry.1.value': 'a private key', + 'Attributes.entry.2.key': 'PlatformPrincipal', + 'Attributes.entry.2.value': 'a ssl certificate', + }, ignore_params_values=['Version', 'ContentType']) + + def test_set_platform_application_attributes(self): + self.set_http_response(status_code=200) + + self.service_connection.set_platform_application_attributes( + platform_application_arn='arn:myapp', + attributes={'PlatformPrincipal': 'a ssl certificate', + 'PlatformCredential': 'a private key'}) + self.assert_request_parameters({ + 'Action': 'SetPlatformApplicationAttributes', + 'PlatformApplicationArn': 'arn:myapp', + 'Attributes.entry.1.key': 'PlatformCredential', + 'Attributes.entry.1.value': 'a private key', + 'Attributes.entry.2.key': 'PlatformPrincipal', + 'Attributes.entry.2.value': 'a ssl certificate', + }, ignore_params_values=['Version', 'ContentType']) + + def test_create_platform_endpoint(self): + self.set_http_response(status_code=200) + + self.service_connection.create_platform_endpoint( + platform_application_arn='arn:myapp', + token='abcde12345', + custom_user_data='john', + attributes={'Enabled': False}) + self.assert_request_parameters({ + 'Action': 'CreatePlatformEndpoint', + 'PlatformApplicationArn': 'arn:myapp', + 'Token': 'abcde12345', + 'CustomUserData': 'john', + 'Attributes.entry.1.key': 'Enabled', + 'Attributes.entry.1.value': False, + }, ignore_params_values=['Version', 'ContentType']) + + def test_set_endpoint_attributes(self): + self.set_http_response(status_code=200) + + self.service_connection.set_endpoint_attributes( + endpoint_arn='arn:myendpoint', + attributes={'CustomUserData': 'john', + 'Enabled': False}) + self.assert_request_parameters({ + 'Action': 'SetEndpointAttributes', + 'EndpointArn': 'arn:myendpoint', + 'Attributes.entry.1.key': 'CustomUserData', + 'Attributes.entry.1.value': 'john', + 'Attributes.entry.2.key': 'Enabled', + 'Attributes.entry.2.value': False, + }, ignore_params_values=['Version', 'ContentType']) + + def test_message_is_required(self): + self.set_http_response(status_code=200) + + with self.assertRaises(TypeError): + self.service_connection.publish(topic='topic', subject='subject') + + def test_publish_with_json(self): + self.set_http_response(status_code=200) + + self.service_connection.publish( + message=json.dumps({ + 'default': 'Ignored.', + 'GCM': { + 'data': 'goes here', + } + }), + message_structure='json', + subject='subject', + target_arn='target_arn' + ) + self.assert_request_parameters({ + 'Action': 'Publish', + 'TargetArn': 'target_arn', + 'Subject': 'subject', + 'MessageStructure': 'json', + }, ignore_params_values=['Version', 'ContentType', 'Message']) + self.assertDictEqual( + json.loads(self.actual_request.params["Message"]), + {"default": "Ignored.", "GCM": {"data": "goes here"}}) + + def test_publish_with_utf8_message(self): + self.set_http_response(status_code=200) + subject = message = u'We \u2665 utf-8'.encode('utf-8') + self.service_connection.publish('topic', message, subject) + self.assert_request_parameters({ + 'Action': 'Publish', + 'TopicArn': 'topic', + 'Subject': subject, + 'Message': message, + }, ignore_params_values=['Version', 'ContentType']) + + def test_publish_with_attributes(self): + self.set_http_response(status_code=200) + + self.service_connection.publish( + message=json.dumps({ + 'default': 'Ignored.', + 'GCM': { + 'data': 'goes here', + } + }, sort_keys=True), + message_structure='json', + subject='subject', + target_arn='target_arn', + message_attributes={ + 'name1': { + 'data_type': 'Number', + 'string_value': '42' + }, + 'name2': { + 'data_type': 'String', + 'string_value': 'Bob' + }, + }, + ) + self.assert_request_parameters({ + 'Action': 'Publish', + 'TargetArn': 'target_arn', + 'Subject': 'subject', + 'Message': '{"GCM": {"data": "goes here"}, "default": "Ignored."}', + 'MessageStructure': 'json', + 'MessageAttributes.entry.1.Name': 'name1', + 'MessageAttributes.entry.1.Value.DataType': 'Number', + 'MessageAttributes.entry.1.Value.StringValue': '42', + 'MessageAttributes.entry.2.Name': 'name2', + 'MessageAttributes.entry.2.Value.DataType': 'String', + 'MessageAttributes.entry.2.Value.StringValue': 'Bob', + }, ignore_params_values=['Version', 'ContentType']) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..b1735de126b24bde2f5b8ba74497d6a1d05de24f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/test_connection.py @@ -0,0 +1,313 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.unit import AWSMockServiceTestCase, MockServiceWithConfigTestCase + +from tests.compat import mock + +from boto.sqs.connection import SQSConnection +from boto.sqs.regioninfo import SQSRegionInfo +from boto.sqs.message import RawMessage +from boto.sqs.queue import Queue +from boto.connection import AWSQueryConnection + +from nose.plugins.attrib import attr + +class SQSAuthParams(AWSMockServiceTestCase): + connection_class = SQSConnection + + def setUp(self): + super(SQSAuthParams, self).setUp() + + def default_body(self): + return """ + + + + https://queue.amazonaws.com/599169622985/myqueue1 + + + + 54d4c94d-2307-54a8-bb27-806a682a5abd + + """ + + @attr(sqs=True) + def test_auth_service_name_override(self): + self.set_http_response(status_code=200) + # We can use the auth_service_name to change what service + # name to use for the credential scope for sigv4. + self.service_connection.auth_service_name = 'service_override' + + self.service_connection.create_queue('my_queue') + # Note the service_override value instead. + self.assertIn('us-east-1/service_override/aws4_request', + self.actual_request.headers['Authorization']) + + @attr(sqs=True) + def test_class_attribute_can_set_service_name(self): + self.set_http_response(status_code=200) + # The SQS class has an 'AuthServiceName' param of 'sqs': + self.assertEqual(self.service_connection.AuthServiceName, 'sqs') + + self.service_connection.create_queue('my_queue') + # And because of this, the value of 'sqs' will be used instead of + # 'queue' for the credential scope: + self.assertIn('us-east-1/sqs/aws4_request', + self.actual_request.headers['Authorization']) + + @attr(sqs=True) + def test_auth_region_name_is_automatically_updated(self): + region = SQSRegionInfo(name='us-west-2', + endpoint='us-west-2.queue.amazonaws.com') + self.service_connection = SQSConnection( + https_connection_factory=self.https_connection_factory, + aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key', + region=region) + self.initialize_service_connection() + self.set_http_response(status_code=200) + + self.service_connection.create_queue('my_queue') + + # Note the region name below is 'us-west-2'. + self.assertIn('us-west-2/sqs/aws4_request', + self.actual_request.headers['Authorization']) + + @attr(sqs=True) + def test_set_get_auth_service_and_region_names(self): + self.service_connection.auth_service_name = 'service_name' + self.service_connection.auth_region_name = 'region_name' + + self.assertEqual(self.service_connection.auth_service_name, + 'service_name') + self.assertEqual(self.service_connection.auth_region_name, 'region_name') + + @attr(sqs=True) + def test_get_queue_with_owner_account_id_returns_queue(self): + + self.set_http_response(status_code=200) + self.service_connection.create_queue('my_queue') + + self.service_connection.get_queue('my_queue', '599169622985') + + assert 'QueueOwnerAWSAccountId' in self.actual_request.params.keys() + self.assertEquals(self.actual_request.params['QueueOwnerAWSAccountId'], '599169622985') + +class SQSProfileName(MockServiceWithConfigTestCase): + connection_class = SQSConnection + profile_name = 'prod' + + def setUp(self): + super(SQSProfileName, self).setUp() + self.config = { + "profile prod": { + 'aws_access_key_id': 'access_key', + 'aws_secret_access_key': 'secret_access', + } + } + + @attr(sqs=True) + def test_profile_name_gets_passed(self): + + region = SQSRegionInfo(name='us-west-2', + endpoint='us-west-2.queue.amazonaws.com') + self.service_connection = SQSConnection( + https_connection_factory=self.https_connection_factory, + region=region, + profile_name=self.profile_name) + self.initialize_service_connection() + self.set_http_response(status_code=200) + + self.assertEquals(self.service_connection.profile_name, self.profile_name) + +class SQSMessageAttributesParsing(AWSMockServiceTestCase): + connection_class = SQSConnection + + def default_body(self): + return """ + + + + This is a test + +eXJYhj5rDql5hp2VwGkXvQVsefdjAlsQe5EGS57gyORPB48KwP1d/3Rfy4DrQXt+MgfRPHUCUH36xL9+Ol/UWD/ylKrrWhiXSY0Ip4EsI8jJNTo/aneEjKE/iZnz/nL8MFP5FmMj8PbDAy5dgvAqsdvX1rm8Ynn0bGnQLJGfH93cLXT65p6Z/FDyjeBN0M+9SWtTcuxOIcMdU8NsoFIwm/6mLWgWAV46OhlYujzvyopCvVwsj+Y8jLEpdSSvTQHNlQEaaY/V511DqAvUwru2p0ZbW7ZzcbhUTn6hHkUROo= + ce114e4501d2f4e2dcea3e17b546f339 + + Count + + Number + 1 + + + + Foo + + String + Bar + + + 7049431b-e5f6-430b-93c4-ded53864d02b + 324758f82d026ac6ec5b31a3b192d1e3 + + + + 73f978f2-400b-5460-8d38-3316e39e79c6 + +""" + + @attr(sqs=True) + def test_message_attribute_response(self): + self.set_http_response(status_code=200) + + queue = Queue( + url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/', + message_class=RawMessage) + message = self.service_connection.receive_message(queue)[0] + + self.assertEqual(message.get_body(), 'This is a test') + self.assertEqual(message.id, '7049431b-e5f6-430b-93c4-ded53864d02b') + self.assertEqual(message.md5, 'ce114e4501d2f4e2dcea3e17b546f339') + self.assertEqual(message.md5_message_attributes, + '324758f82d026ac6ec5b31a3b192d1e3') + + mattributes = message.message_attributes + self.assertEqual(len(mattributes.keys()), 2) + self.assertEqual(mattributes['Count']['data_type'], 'Number') + self.assertEqual(mattributes['Foo']['string_value'], 'Bar') + + +class SQSSendMessageAttributes(AWSMockServiceTestCase): + connection_class = SQSConnection + + def default_body(self): + return """ + + + fafb00f5732ab283681e124bf8747ed1 + + + 3ae8f24a165a8cedc005670c81a27295 + + + 5fea7756-0ea4-451a-a703-a558b933e274 + + + + + 27daac76-34dd-47df-bd01-1f6e873584a0 + + + +""" + + @attr(sqs=True) + def test_send_message_attributes(self): + self.set_http_response(status_code=200) + + queue = Queue( + url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/', + message_class=RawMessage) + self.service_connection.send_message(queue, 'Test message', + message_attributes={ + 'name1': { + 'data_type': 'String', + 'string_value': 'Bob' + }, + 'name2': { + 'data_type': 'Number', + 'string_value': '1' + } + }) + + self.assert_request_parameters({ + 'Action': 'SendMessage', + 'MessageAttribute.1.Name': 'name1', + 'MessageAttribute.1.Value.DataType': 'String', + 'MessageAttribute.1.Value.StringValue': 'Bob', + 'MessageAttribute.2.Name': 'name2', + 'MessageAttribute.2.Value.DataType': 'Number', + 'MessageAttribute.2.Value.StringValue': '1', + 'MessageBody': 'Test message', + 'Version': '2012-11-05' + }) + + +class SQSSendBatchMessageAttributes(AWSMockServiceTestCase): + connection_class = SQSConnection + + def default_body(self): + return """ + + + test_msg_001 + 0a5231c7-8bff-4955-be2e-8dc7c50a25fa + 0e024d309850c78cba5eabbeff7cae71 + + + test_msg_002 + 15ee1ed3-87e7-40c1-bdaa-2e49968ea7e9 + 7fb8146a82f95e0af155278f406862c2 + 295c5fa15a51aae6884d1d7c1d99ca50 + + + + ca1ad5d0-8271-408b-8d0f-1351bf547e74 + + +""" + + @attr(sqs=True) + def test_send_message_attributes(self): + self.set_http_response(status_code=200) + + queue = Queue( + url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/', + message_class=RawMessage) + + message1 = (1, 'Message 1', 0, {'name1': {'data_type': 'String', + 'string_value': 'foo'}}) + message2 = (2, 'Message 2', 0, {'name2': {'data_type': 'Number', + 'string_value': '1'}}) + + self.service_connection.send_message_batch(queue, (message1, message2)) + + self.assert_request_parameters({ + 'Action': 'SendMessageBatch', + 'SendMessageBatchRequestEntry.1.DelaySeconds': 0, + 'SendMessageBatchRequestEntry.1.Id': 1, + 'SendMessageBatchRequestEntry.1.MessageAttribute.1.DataType': 'String', + 'SendMessageBatchRequestEntry.1.MessageAttribute.1.Name': 'name1', + 'SendMessageBatchRequestEntry.1.MessageAttribute.1.StringValue': 'foo', + 'SendMessageBatchRequestEntry.1.MessageBody': 'Message 1', + 'SendMessageBatchRequestEntry.2.DelaySeconds': 0, + 'SendMessageBatchRequestEntry.2.Id': 2, + 'SendMessageBatchRequestEntry.2.MessageAttribute.1.DataType': 'Number', + 'SendMessageBatchRequestEntry.2.MessageAttribute.1.Name': 'name2', + 'SendMessageBatchRequestEntry.2.MessageAttribute.1.StringValue': '1', + 'SendMessageBatchRequestEntry.2.MessageBody': 'Message 2', + 'Version': '2012-11-05' + }) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/test_message.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/test_message.py new file mode 100644 index 0000000000000000000000000000000000000000..b026a2afa5ad1d3713f6e9327205de8f7c161707 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/test_message.py @@ -0,0 +1,116 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from tests.unit import unittest + +from boto.sqs.message import MHMessage +from boto.sqs.message import RawMessage +from boto.sqs.message import Message +from boto.sqs.bigmessage import BigMessage +from boto.exception import SQSDecodeError + +from nose.plugins.attrib import attr + +class TestMHMessage(unittest.TestCase): + + @attr(sqs=True) + def test_contains(self): + msg = MHMessage() + msg.update({'hello': 'world'}) + self.assertTrue('hello' in msg) + + +class DecodeExceptionRaisingMessage(RawMessage): + + @attr(sqs=True) + def decode(self, message): + raise SQSDecodeError('Sample decode error', self) + +class TestEncodeMessage(unittest.TestCase): + + @attr(sqs=True) + def test_message_id_available(self): + import xml.sax + from boto.resultset import ResultSet + from boto.handler import XmlHandler + sample_value = 'abcdef' + body = """ + + + + %s + %s + %s + + + """ % tuple([sample_value] * 3) + rs = ResultSet([('Message', DecodeExceptionRaisingMessage)]) + h = XmlHandler(rs, None) + with self.assertRaises(SQSDecodeError) as context: + xml.sax.parseString(body.encode('utf-8'), h) + message = context.exception.message + self.assertEquals(message.id, sample_value) + self.assertEquals(message.receipt_handle, sample_value) + + @attr(sqs=True) + def test_encode_bytes_message(self): + message = Message() + body = b'\x00\x01\x02\x03\x04\x05' + message.set_body(body) + self.assertEqual(message.get_body_encoded(), 'AAECAwQF') + + @attr(sqs=True) + def test_encode_string_message(self): + message = Message() + body = 'hello world' + message.set_body(body) + self.assertEqual(message.get_body_encoded(), 'aGVsbG8gd29ybGQ=') + + +class TestBigMessage(unittest.TestCase): + + @attr(sqs=True) + def test_s3url_parsing(self): + msg = BigMessage() + # Try just a bucket name + bucket, key = msg._get_bucket_key('s3://foo') + self.assertEquals(bucket, 'foo') + self.assertEquals(key, None) + # Try just a bucket name with trailing "/" + bucket, key = msg._get_bucket_key('s3://foo/') + self.assertEquals(bucket, 'foo') + self.assertEquals(key, None) + # Try a bucket and a key + bucket, key = msg._get_bucket_key('s3://foo/bar') + self.assertEquals(bucket, 'foo') + self.assertEquals(key, 'bar') + # Try a bucket and a key with "/" + bucket, key = msg._get_bucket_key('s3://foo/bar/fie/baz') + self.assertEquals(bucket, 'foo') + self.assertEquals(key, 'bar/fie/baz') + # Try it with no s3:// prefix + with self.assertRaises(SQSDecodeError) as context: + bucket, key = msg._get_bucket_key('foo/bar') + + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/test_queue.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/test_queue.py new file mode 100644 index 0000000000000000000000000000000000000000..63ac308e531da44f80b871d0448f1228631d826b --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/sqs/test_queue.py @@ -0,0 +1,51 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from tests.unit import unittest +from mock import Mock + +from boto.sqs.queue import Queue + +from nose.plugins.attrib import attr + +class TestQueue(unittest.TestCase): + + @attr(sqs=True) + def test_queue_arn(self): + connection = Mock() + connection.region.name = 'us-east-1' + q = Queue( + connection=connection, + url='https://sqs.us-east-1.amazonaws.com/id/queuename') + self.assertEqual(q.arn, 'arn:aws:sqs:us-east-1:id:queuename') + + @attr(sqs=True) + def test_queue_name(self): + connection = Mock() + connection.region.name = 'us-east-1' + q = Queue( + connection=connection, + url='https://sqs.us-east-1.amazonaws.com/id/queuename') + self.assertEqual(q.name, 'queuename') + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/sts/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/sts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/sts/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/sts/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..dd97c770d8bc280b5de74a0428d55184853cebc9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/sts/test_connection.py @@ -0,0 +1,244 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.unit import unittest +from boto.sts.connection import STSConnection +from tests.unit import AWSMockServiceTestCase + + +class TestSecurityToken(AWSMockServiceTestCase): + connection_class = STSConnection + + def create_service_connection(self, **kwargs): + kwargs['security_token'] = 'token' + + return super(TestSecurityToken, self).create_service_connection(**kwargs) + + def test_security_token(self): + self.assertEqual('token', + self.service_connection.provider.security_token) + +class TestSTSConnection(AWSMockServiceTestCase): + connection_class = STSConnection + + def setUp(self): + super(TestSTSConnection, self).setUp() + + def default_body(self): + return b""" + + + + arn:role + roleid:myrolesession + + + session_token + secretkey + 2012-10-18T10:18:14.789Z + accesskey + + + + 8b7418cb-18a8-11e2-a706-4bd22ca68ab7 + + + """ + + def test_assume_role(self): + self.set_http_response(status_code=200) + response = self.service_connection.assume_role('arn:role', 'mysession') + self.assert_request_parameters( + {'Action': 'AssumeRole', + 'RoleArn': 'arn:role', + 'RoleSessionName': 'mysession'}, + ignore_params_values=['Version']) + self.assertEqual(response.credentials.access_key, 'accesskey') + self.assertEqual(response.credentials.secret_key, 'secretkey') + self.assertEqual(response.credentials.session_token, 'session_token') + self.assertEqual(response.user.arn, 'arn:role') + self.assertEqual(response.user.assume_role_id, 'roleid:myrolesession') + + def test_assume_role_with_mfa(self): + self.set_http_response(status_code=200) + response = self.service_connection.assume_role( + 'arn:role', + 'mysession', + mfa_serial_number='GAHT12345678', + mfa_token='abc123' + ) + self.assert_request_parameters( + {'Action': 'AssumeRole', + 'RoleArn': 'arn:role', + 'RoleSessionName': 'mysession', + 'SerialNumber': 'GAHT12345678', + 'TokenCode': 'abc123'}, + ignore_params_values=['Version']) + self.assertEqual(response.credentials.access_key, 'accesskey') + self.assertEqual(response.credentials.secret_key, 'secretkey') + self.assertEqual(response.credentials.session_token, 'session_token') + self.assertEqual(response.user.arn, 'arn:role') + self.assertEqual(response.user.assume_role_id, 'roleid:myrolesession') + + +class TestSTSWebIdentityConnection(AWSMockServiceTestCase): + connection_class = STSConnection + + def setUp(self): + super(TestSTSWebIdentityConnection, self).setUp() + + def default_body(self): + return b""" + + + + amzn1.account.AF6RHO7KZU5XRVQJGXK6HB56KR2A + + + + arn:aws:sts::000240903217:assumed-role/FederatedWebIdentityRole/app1 + + + AROACLKWSDQRAOFQC3IDI:app1 + + + + + AQoDYXdzEE0a8ANXXXXXXXXNO1ewxE5TijQyp+IPfnyowF + + + secretkey + + + 2013-05-14T23:00:23Z + + + accesskey + + + + + ad4156e9-bce1-11e2-82e6-6b6ef249e618 + + + """ + + def test_assume_role_with_web_identity(self): + arn = 'arn:aws:iam::000240903217:role/FederatedWebIdentityRole' + wit = 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9' + + self.set_http_response(status_code=200) + response = self.service_connection.assume_role_with_web_identity( + role_arn=arn, + role_session_name='guestuser', + web_identity_token=wit, + provider_id='www.amazon.com', + ) + self.assert_request_parameters({ + 'RoleSessionName': 'guestuser', + 'RoleArn': arn, + 'WebIdentityToken': wit, + 'ProviderId': 'www.amazon.com', + 'Action': 'AssumeRoleWithWebIdentity' + }, ignore_params_values=[ + 'Version' + ]) + self.assertEqual( + response.credentials.access_key.strip(), + 'accesskey' + ) + self.assertEqual( + response.credentials.secret_key.strip(), + 'secretkey' + ) + self.assertEqual( + response.credentials.session_token.strip(), + 'AQoDYXdzEE0a8ANXXXXXXXXNO1ewxE5TijQyp+IPfnyowF' + ) + self.assertEqual( + response.user.arn.strip(), + 'arn:aws:sts::000240903217:assumed-role/FederatedWebIdentityRole/app1' + ) + self.assertEqual( + response.user.assume_role_id.strip(), + 'AROACLKWSDQRAOFQC3IDI:app1' + ) + + +class TestSTSSAMLConnection(AWSMockServiceTestCase): + connection_class = STSConnection + + def setUp(self): + super(TestSTSSAMLConnection, self).setUp() + + def default_body(self): + return b""" + + + + session_token + secretkey + 2011-07-15T23:28:33.359Z + accesskey + + + arn:role + roleid:myrolesession + + 6 + + + c6104cbe-af31-11e0-8154-cbc7ccf896c7 + + +""" + + def test_assume_role_with_saml(self): + arn = 'arn:aws:iam::000240903217:role/Test' + principal = 'arn:aws:iam::000240903217:role/Principal' + assertion = 'test' + + self.set_http_response(status_code=200) + response = self.service_connection.assume_role_with_saml( + role_arn=arn, + principal_arn=principal, + saml_assertion=assertion + ) + self.assert_request_parameters({ + 'RoleArn': arn, + 'PrincipalArn': principal, + 'SAMLAssertion': assertion, + 'Action': 'AssumeRoleWithSAML' + }, ignore_params_values=[ + 'Version' + ]) + self.assertEqual(response.credentials.access_key, 'accesskey') + self.assertEqual(response.credentials.secret_key, 'secretkey') + self.assertEqual(response.credentials.session_token, 'session_token') + self.assertEqual(response.user.arn, 'arn:role') + self.assertEqual(response.user.assume_role_id, 'roleid:myrolesession') + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/sts/test_credentials.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/sts/test_credentials.py new file mode 100644 index 0000000000000000000000000000000000000000..27a16ca72a81b02960376ccf700d76b128e67668 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/sts/test_credentials.py @@ -0,0 +1,38 @@ +import unittest + +from boto.sts.credentials import Credentials + + +class STSCredentialsTest(unittest.TestCase): + sts = True + + def setUp(self): + super(STSCredentialsTest, self).setUp() + self.creds = Credentials() + + def test_to_dict(self): + # This would fail miserably if ``Credentials.request_id`` hadn't been + # explicitly set (no default). + # Default. + self.assertEqual(self.creds.to_dict(), { + 'access_key': None, + 'expiration': None, + 'request_id': None, + 'secret_key': None, + 'session_token': None + }) + + # Override. + creds = Credentials() + creds.access_key = 'something' + creds.secret_key = 'crypto' + creds.session_token = 'this' + creds.expiration = 'way' + creds.request_id = 'comes' + self.assertEqual(creds.to_dict(), { + 'access_key': 'something', + 'expiration': 'way', + 'request_id': 'comes', + 'secret_key': 'crypto', + 'session_token': 'this' + }) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer1_decisions.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer1_decisions.py new file mode 100644 index 0000000000000000000000000000000000000000..c5adf612dbab7cd7d90eb44229097c676708f11f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer1_decisions.py @@ -0,0 +1,35 @@ +from tests.unit import unittest + +import boto.swf.layer1_decisions + + +class TestDecisions(unittest.TestCase): + + def setUp(self): + self.decisions = boto.swf.layer1_decisions.Layer1Decisions() + + def assert_data(self, *data): + self.assertEquals(self.decisions._data, list(data)) + + def test_continue_as_new_workflow_execution(self): + self.decisions.continue_as_new_workflow_execution( + child_policy='TERMINATE', + execution_start_to_close_timeout='10', + input='input', + tag_list=['t1', 't2'], + task_list='tasklist', + start_to_close_timeout='20', + workflow_type_version='v2' + ) + self.assert_data({ + 'decisionType': 'ContinueAsNewWorkflowExecution', + 'continueAsNewWorkflowExecutionDecisionAttributes': { + 'childPolicy': 'TERMINATE', + 'executionStartToCloseTimeout': '10', + 'input': 'input', + 'tagList': ['t1', 't2'], + 'taskList': {'name': 'tasklist'}, + 'taskStartToCloseTimeout': '20', + 'workflowTypeVersion': 'v2', + } + }) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_actors.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_actors.py new file mode 100644 index 0000000000000000000000000000000000000000..cedf895b72d63e46fb4555212dd8870f47d1687f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_actors.py @@ -0,0 +1,87 @@ +import boto.swf.layer2 +from boto.swf.layer2 import Decider, ActivityWorker +from tests.unit import unittest +from mock import Mock + + +class TestActors(unittest.TestCase): + + def setUp(self): + boto.swf.layer2.Layer1 = Mock() + self.worker = ActivityWorker(name='test-worker', domain='test', task_list='test_list') + self.decider = Decider(name='test-worker', domain='test', task_list='test_list') + self.worker._swf = Mock() + self.decider._swf = Mock() + + def test_decider_pass_tasktoken(self): + self.decider._swf.poll_for_decision_task.return_value = { + 'events': [{'eventId': 1, + 'eventTimestamp': 1379019427.953, + 'eventType': 'WorkflowExecutionStarted', + 'workflowExecutionStartedEventAttributes': { + 'childPolicy': 'TERMINATE', + 'executionStartToCloseTimeout': '3600', + 'parentInitiatedEventId': 0, + 'taskList': {'name': 'test_list'}, + 'taskStartToCloseTimeout': '123', + 'workflowType': {'name': 'test_workflow_name', + 'version': 'v1'}}}, + {'decisionTaskScheduledEventAttributes': + {'startToCloseTimeout': '123', + 'taskList': {'name': 'test_list'}}, + 'eventId': 2, + 'eventTimestamp': 1379019427.953, + 'eventType': 'DecisionTaskScheduled'}, + {'decisionTaskStartedEventAttributes': {'scheduledEventId': 2}, + 'eventId': 3, 'eventTimestamp': 1379019495.585, + 'eventType': 'DecisionTaskStarted'}], + 'previousStartedEventId': 0, 'startedEventId': 3, + 'taskToken': 'my_specific_task_token', + 'workflowExecution': {'runId': 'fwr243dsa324132jmflkfu0943tr09=', + 'workflowId': 'test_workflow_name-v1-1379019427'}, + 'workflowType': {'name': 'test_workflow_name', 'version': 'v1'}} + + self.decider.poll() + self.decider.complete() + + self.decider._swf.respond_decision_task_completed.assert_called_with('my_specific_task_token', None) + self.assertEqual('my_specific_task_token', self.decider.last_tasktoken) + + def test_worker_pass_tasktoken(self): + task_token = 'worker_task_token' + self.worker._swf.poll_for_activity_task.return_value = { + 'activityId': 'SomeActivity-1379020713', + 'activityType': {'name': 'SomeActivity', 'version': '1.0'}, + 'startedEventId': 6, + 'taskToken': task_token, + 'workflowExecution': {'runId': '12T026NzGK5c4eMti06N9O3GHFuTDaNyA+8LFtoDkAwfE=', + 'workflowId': 'MyWorkflow-1.0-1379020705'}} + + self.worker.poll() + + self.worker.cancel(details='Cancelling!') + self.worker.complete(result='Done!') + self.worker.fail(reason='Failure!') + self.worker.heartbeat() + + self.worker._swf.respond_activity_task_canceled.assert_called_with(task_token, 'Cancelling!') + self.worker._swf.respond_activity_task_completed.assert_called_with(task_token, 'Done!') + self.worker._swf.respond_activity_task_failed.assert_called_with(task_token, None, 'Failure!') + self.worker._swf.record_activity_task_heartbeat.assert_called_with(task_token, None) + + def test_actor_poll_without_tasklist_override(self): + self.worker.poll() + self.decider.poll() + self.worker._swf.poll_for_activity_task.assert_called_with('test', 'test_list') + self.decider._swf.poll_for_decision_task.assert_called_with('test', 'test_list') + + def test_worker_override_tasklist(self): + self.worker.poll(task_list='some_other_tasklist') + self.worker._swf.poll_for_activity_task.assert_called_with('test', 'some_other_tasklist') + + def test_decider_override_tasklist(self): + self.decider.poll(task_list='some_other_tasklist') + self.decider._swf.poll_for_decision_task.assert_called_with('test', 'some_other_tasklist') + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_base.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_base.py new file mode 100644 index 0000000000000000000000000000000000000000..7790f458b1095a933058b641ec97126ba57cb058 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_base.py @@ -0,0 +1,31 @@ +import boto.swf.layer2 +from boto.swf.layer2 import SWFBase +from tests.unit import unittest +from mock import Mock + + +MOCK_DOMAIN = 'Mock' +MOCK_ACCESS_KEY = 'inheritable access key' +MOCK_SECRET_KEY = 'inheritable secret key' +MOCK_REGION = 'Mock Region' + + +class TestBase(unittest.TestCase): + """ + Test for SWFBase. + """ + def setUp(self): + boto.swf.layer2.Layer1 = Mock() + self.swf_base = SWFBase( + domain=MOCK_DOMAIN, aws_access_key_id=MOCK_ACCESS_KEY, + aws_secret_access_key=MOCK_SECRET_KEY, region=MOCK_REGION + ) + + def test_instantiation(self): + self.assertEquals(MOCK_DOMAIN, self.swf_base.domain) + self.assertEquals(MOCK_ACCESS_KEY, self.swf_base.aws_access_key_id) + self.assertEquals(MOCK_SECRET_KEY, + self.swf_base.aws_secret_access_key) + self.assertEquals(MOCK_REGION, self.swf_base.region) + boto.swf.layer2.Layer1.assert_called_with( + MOCK_ACCESS_KEY, MOCK_SECRET_KEY, region=MOCK_REGION) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_domain.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_domain.py new file mode 100644 index 0000000000000000000000000000000000000000..43efc8a2f50e773550217add9f57d1def0bafade --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_domain.py @@ -0,0 +1,116 @@ +import boto.swf.layer2 +from boto.swf.layer2 import Domain, ActivityType, WorkflowType, WorkflowExecution +from tests.unit import unittest +from mock import Mock + + +class TestDomain(unittest.TestCase): + + def setUp(self): + boto.swf.layer2.Layer1 = Mock() + self.domain = Domain(name='test-domain', description='My test domain') + self.domain.aws_access_key_id = 'inheritable access key' + self.domain.aws_secret_access_key = 'inheritable secret key' + self.domain.region = 'test-region' + + def test_domain_instantiation(self): + self.assertEquals('test-domain', self.domain.name) + self.assertEquals('My test domain', self.domain.description) + + def test_domain_list_activities(self): + self.domain._swf.list_activity_types.return_value = { + 'typeInfos': [{'activityType': {'name': 'DeleteLocalFile', + 'version': '1.0'}, + 'creationDate': 1332853651.235, + 'status': 'REGISTERED'}, + {'activityType': {'name': 'DoUpdate', 'version': 'test'}, + 'creationDate': 1333463734.528, + 'status': 'REGISTERED'}, + {'activityType': {'name': 'GrayscaleTransform', + 'version': '1.0'}, + 'creationDate': 1332853651.18, + 'status': 'REGISTERED'}, + {'activityType': {'name': 'S3Download', 'version': '1.0'}, + 'creationDate': 1332853651.264, + 'status': 'REGISTERED'}, + {'activityType': {'name': 'S3Upload', 'version': '1.0'}, + 'creationDate': 1332853651.314, + 'status': 'REGISTERED'}, + {'activityType': {'name': 'SepiaTransform', 'version': '1.1'}, + 'creationDate': 1333373797.734, + 'status': 'REGISTERED'}]} + + expected_names = ('DeleteLocalFile', 'GrayscaleTransform', 'S3Download', + 'S3Upload', 'SepiaTransform', 'DoUpdate') + + activity_types = self.domain.activities() + self.assertEquals(6, len(activity_types)) + for activity_type in activity_types: + self.assertIsInstance(activity_type, ActivityType) + self.assertTrue(activity_type.name in expected_names) + self.assertEquals(self.domain.region, activity_type.region) + + def test_domain_list_workflows(self): + self.domain._swf.list_workflow_types.return_value = { + 'typeInfos': [{'creationDate': 1332853651.136, + 'description': 'Image processing sample workflow type', + 'status': 'REGISTERED', + 'workflowType': {'name': 'ProcessFile', 'version': '1.0'}}, + {'creationDate': 1333551719.89, + 'status': 'REGISTERED', + 'workflowType': {'name': 'test_workflow_name', + 'version': 'v1'}}]} + expected_names = ('ProcessFile', 'test_workflow_name') + + workflow_types = self.domain.workflows() + self.assertEquals(2, len(workflow_types)) + for workflow_type in workflow_types: + self.assertIsInstance(workflow_type, WorkflowType) + self.assertTrue(workflow_type.name in expected_names) + self.assertEquals(self.domain.aws_access_key_id, workflow_type.aws_access_key_id) + self.assertEquals(self.domain.aws_secret_access_key, workflow_type.aws_secret_access_key) + self.assertEquals(self.domain.name, workflow_type.domain) + self.assertEquals(self.domain.region, workflow_type.region) + + def test_domain_list_executions(self): + self.domain._swf.list_open_workflow_executions.return_value = { + 'executionInfos': [{'cancelRequested': False, + 'execution': {'runId': '12OeDTyoD27TDaafViz/QIlCHrYzspZmDgj0coIfjm868=', + 'workflowId': 'ProcessFile-1.0-1378933928'}, + 'executionStatus': 'OPEN', + 'startTimestamp': 1378933928.676, + 'workflowType': {'name': 'ProcessFile', + 'version': '1.0'}}, + {'cancelRequested': False, + 'execution': {'runId': '12GwBkx4hH6t2yaIh8LYxy5HyCM6HcyhDKePJCg0/ciJk=', + 'workflowId': 'ProcessFile-1.0-1378933927'}, + 'executionStatus': 'OPEN', + 'startTimestamp': 1378933927.919, + 'workflowType': {'name': 'ProcessFile', + 'version': '1.0'}}, + {'cancelRequested': False, + 'execution': {'runId': '12oRG3vEWrQ7oYBV+Bqi33Fht+ZRCYTt+tOdn5kLVcwKI=', + 'workflowId': 'ProcessFile-1.0-1378933926'}, + 'executionStatus': 'OPEN', + 'startTimestamp': 1378933927.04, + 'workflowType': {'name': 'ProcessFile', + 'version': '1.0'}}, + {'cancelRequested': False, + 'execution': {'runId': '12qrdcpYmad2cjnqJcM4Njm3qrCGvmRFR1wwQEt+a2ako=', + 'workflowId': 'ProcessFile-1.0-1378933874'}, + 'executionStatus': 'OPEN', + 'startTimestamp': 1378933874.956, + 'workflowType': {'name': 'ProcessFile', + 'version': '1.0'}}]} + + executions = self.domain.executions() + self.assertEquals(4, len(executions)) + for wf_execution in executions: + self.assertIsInstance(wf_execution, WorkflowExecution) + self.assertEquals(self.domain.aws_access_key_id, wf_execution.aws_access_key_id) + self.assertEquals(self.domain.aws_secret_access_key, wf_execution.aws_secret_access_key) + self.assertEquals(self.domain.name, wf_execution.domain) + self.assertEquals(self.domain.region, wf_execution.region) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_types.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_types.py new file mode 100644 index 0000000000000000000000000000000000000000..d9b7db0d42067ec7ed3f18375083f6befc18369e --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/swf/test_layer2_types.py @@ -0,0 +1,46 @@ +import boto.swf.layer2 +from boto.swf.layer2 import ActivityType, WorkflowType, WorkflowExecution +from tests.unit import unittest +from mock import Mock, ANY + + +class TestTypes(unittest.TestCase): + + def setUp(self): + boto.swf.layer2.Layer1 = Mock() + + def test_workflow_type_register_defaults(self): + wf_type = WorkflowType(name='name', domain='test', version='1') + wf_type.register() + + wf_type._swf.register_workflow_type.assert_called_with('test', 'name', '1', + default_execution_start_to_close_timeout=ANY, + default_task_start_to_close_timeout=ANY, + default_child_policy=ANY + ) + + def test_activity_type_register_defaults(self): + act_type = ActivityType(name='name', domain='test', version='1') + act_type.register() + + act_type._swf.register_activity_type.assert_called_with('test', 'name', '1', + default_task_heartbeat_timeout=ANY, + default_task_schedule_to_close_timeout=ANY, + default_task_schedule_to_start_timeout=ANY, + default_task_start_to_close_timeout=ANY + ) + + def test_workflow_type_start_execution(self): + wf_type = WorkflowType(name='name', domain='test', version='1') + run_id = '122aJcg6ic7MRAkjDRzLBsqU/R49qt5D0LPHycT/6ArN4=' + wf_type._swf.start_workflow_execution.return_value = {'runId': run_id} + + execution = wf_type.start(task_list='hello_world') + + self.assertIsInstance(execution, WorkflowExecution) + self.assertEquals(wf_type.name, execution.name) + self.assertEquals(wf_type.version, execution.version) + self.assertEquals(run_id, execution.runId) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/test_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..69e8816e91081889898a325e5fa329a264311b46 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/test_connection.py @@ -0,0 +1,539 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os +import socket + +from tests.compat import mock, unittest +from httpretty import HTTPretty + +from boto import UserAgent +from boto.compat import json, parse_qs +from boto.connection import AWSQueryConnection, AWSAuthConnection, HTTPRequest +from boto.exception import BotoServerError +from boto.regioninfo import RegionInfo + + +class TestListParamsSerialization(unittest.TestCase): + maxDiff = None + + def setUp(self): + self.connection = AWSQueryConnection('access_key', 'secret_key') + + def test_complex_list_serialization(self): + # This example is taken from the doc string of + # build_complex_list_params. + params = {} + self.connection.build_complex_list_params( + params, [('foo', 'bar', 'baz'), ('foo2', 'bar2', 'baz2')], + 'ParamName.member', ('One', 'Two', 'Three')) + self.assertDictEqual({ + 'ParamName.member.1.One': 'foo', + 'ParamName.member.1.Two': 'bar', + 'ParamName.member.1.Three': 'baz', + 'ParamName.member.2.One': 'foo2', + 'ParamName.member.2.Two': 'bar2', + 'ParamName.member.2.Three': 'baz2', + }, params) + + def test_simple_list_serialization(self): + params = {} + self.connection.build_list_params( + params, ['foo', 'bar', 'baz'], 'ParamName.member') + self.assertDictEqual({ + 'ParamName.member.1': 'foo', + 'ParamName.member.2': 'bar', + 'ParamName.member.3': 'baz', + }, params) + + +class MockAWSService(AWSQueryConnection): + """ + Fake AWS Service + + This is used to test the AWSQueryConnection object is behaving properly. + """ + + APIVersion = '2012-01-01' + + def _required_auth_capability(self): + return ['sign-v2'] + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, host=None, port=None, + proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + api_version=None, security_token=None, + validate_certs=True, profile_name=None): + self.region = region + if host is None: + host = self.region.endpoint + AWSQueryConnection.__init__(self, aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + host, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + +class TestAWSAuthConnection(unittest.TestCase): + def test_get_path(self): + conn = AWSAuthConnection( + 'mockservice.cc-zone-1.amazonaws.com', + aws_access_key_id='access_key', + aws_secret_access_key='secret', + suppress_consec_slashes=False + ) + # Test some sample paths for mangling. + self.assertEqual(conn.get_path('/'), '/') + self.assertEqual(conn.get_path('image.jpg'), '/image.jpg') + self.assertEqual(conn.get_path('folder/image.jpg'), '/folder/image.jpg') + self.assertEqual(conn.get_path('folder//image.jpg'), '/folder//image.jpg') + + # Ensure leading slashes aren't removed. + # See https://github.com/boto/boto/issues/1387 + self.assertEqual(conn.get_path('/folder//image.jpg'), '/folder//image.jpg') + self.assertEqual(conn.get_path('/folder////image.jpg'), '/folder////image.jpg') + self.assertEqual(conn.get_path('///folder////image.jpg'), '///folder////image.jpg') + + def test_connection_behind_proxy(self): + os.environ['http_proxy'] = "http://john.doe:p4ssw0rd@127.0.0.1:8180" + conn = AWSAuthConnection( + 'mockservice.cc-zone-1.amazonaws.com', + aws_access_key_id='access_key', + aws_secret_access_key='secret', + suppress_consec_slashes=False + ) + self.assertEqual(conn.proxy, '127.0.0.1') + self.assertEqual(conn.proxy_user, 'john.doe') + self.assertEqual(conn.proxy_pass, 'p4ssw0rd') + self.assertEqual(conn.proxy_port, '8180') + del os.environ['http_proxy'] + + def test_get_proxy_url_with_auth(self): + conn = AWSAuthConnection( + 'mockservice.cc-zone-1.amazonaws.com', + aws_access_key_id='access_key', + aws_secret_access_key='secret', + suppress_consec_slashes=False, + proxy="127.0.0.1", + proxy_user="john.doe", + proxy_pass="p4ssw0rd", + proxy_port="8180" + ) + self.assertEqual(conn.get_proxy_url_with_auth(), 'http://john.doe:p4ssw0rd@127.0.0.1:8180') + + def test_connection_behind_proxy_without_explicit_port(self): + os.environ['http_proxy'] = "http://127.0.0.1" + conn = AWSAuthConnection( + 'mockservice.cc-zone-1.amazonaws.com', + aws_access_key_id='access_key', + aws_secret_access_key='secret', + suppress_consec_slashes=False, + port=8180 + ) + self.assertEqual(conn.proxy, '127.0.0.1') + self.assertEqual(conn.proxy_port, 8180) + del os.environ['http_proxy'] + + @mock.patch.object(socket, 'create_connection') + @mock.patch('boto.compat.http_client.HTTPResponse') + @mock.patch('boto.compat.http_client.ssl') + def test_proxy_ssl(self, ssl_mock, http_response_mock, + create_connection_mock): + type(http_response_mock.return_value).status = mock.PropertyMock( + return_value=200) + + conn = AWSAuthConnection( + 'mockservice.cc-zone-1.amazonaws.com', + aws_access_key_id='access_key', + aws_secret_access_key='secret', + suppress_consec_slashes=False, + proxy_port=80 + ) + conn.https_validate_certificates = False + + # Attempt to call proxy_ssl and make sure it works + conn.proxy_ssl('mockservice.cc-zone-1.amazonaws.com', 80) + + # this tests the proper setting of the host_header in v4 signing + def test_host_header_with_nonstandard_port(self): + # test standard port first + conn = V4AuthConnection( + 'testhost', + aws_access_key_id='access_key', + aws_secret_access_key='secret') + request = conn.build_base_http_request( + method='POST', path='/', auth_path=None, params=None, headers=None, + data='', host=None) + conn.set_host_header(request) + self.assertEqual(request.headers['Host'], 'testhost') + + # next, test non-standard port + conn = V4AuthConnection( + 'testhost', + aws_access_key_id='access_key', + aws_secret_access_key='secret', + port=8773) + request = conn.build_base_http_request( + method='POST', path='/', auth_path=None, params=None, headers=None, + data='', host=None) + conn.set_host_header(request) + self.assertEqual(request.headers['Host'], 'testhost:8773') + + +class V4AuthConnection(AWSAuthConnection): + def __init__(self, host, aws_access_key_id, aws_secret_access_key, port=443): + AWSAuthConnection.__init__( + self, host, aws_access_key_id, aws_secret_access_key, port=port) + + def _required_auth_capability(self): + return ['hmac-v4'] + + +class TestAWSQueryConnection(unittest.TestCase): + def setUp(self): + self.region = RegionInfo( + name='cc-zone-1', + endpoint='mockservice.cc-zone-1.amazonaws.com', + connection_cls=MockAWSService) + + HTTPretty.enable() + + def tearDown(self): + HTTPretty.disable() + + +class TestAWSQueryConnectionSimple(TestAWSQueryConnection): + def test_query_connection_basis(self): + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/' % self.region.endpoint, + json.dumps({'test': 'secure'}), + content_type='application/json') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + + self.assertEqual(conn.host, 'mockservice.cc-zone-1.amazonaws.com') + + def test_query_connection_noproxy(self): + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/' % self.region.endpoint, + json.dumps({'test': 'secure'}), + content_type='application/json') + + os.environ['no_proxy'] = self.region.endpoint + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret', + proxy="NON_EXISTENT_HOSTNAME", + proxy_port="3128") + + resp = conn.make_request('myCmd', + {'par1': 'foo', 'par2': 'baz'}, + "/", + "POST") + del os.environ['no_proxy'] + args = parse_qs(HTTPretty.last_request.body) + self.assertEqual(args[b'AWSAccessKeyId'], [b'access_key']) + + def test_query_connection_noproxy_nosecure(self): + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/' % self.region.endpoint, + json.dumps({'test': 'insecure'}), + content_type='application/json') + + os.environ['no_proxy'] = self.region.endpoint + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret', + proxy="NON_EXISTENT_HOSTNAME", + proxy_port="3128", + is_secure=False) + + resp = conn.make_request('myCmd', + {'par1': 'foo', 'par2': 'baz'}, + "/", + "POST") + del os.environ['no_proxy'] + args = parse_qs(HTTPretty.last_request.body) + self.assertEqual(args[b'AWSAccessKeyId'], [b'access_key']) + + def test_single_command(self): + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/' % self.region.endpoint, + json.dumps({'test': 'secure'}), + content_type='application/json') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + resp = conn.make_request('myCmd', + {'par1': 'foo', 'par2': 'baz'}, + "/", + "POST") + + args = parse_qs(HTTPretty.last_request.body) + self.assertEqual(args[b'AWSAccessKeyId'], [b'access_key']) + self.assertEqual(args[b'SignatureMethod'], [b'HmacSHA256']) + self.assertEqual(args[b'Version'], [conn.APIVersion.encode('utf-8')]) + self.assertEqual(args[b'par1'], [b'foo']) + self.assertEqual(args[b'par2'], [b'baz']) + + self.assertEqual(resp.read(), b'{"test": "secure"}') + + def test_multi_commands(self): + """Check connection re-use""" + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/' % self.region.endpoint, + json.dumps({'test': 'secure'}), + content_type='application/json') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + + resp1 = conn.make_request('myCmd1', + {'par1': 'foo', 'par2': 'baz'}, + "/", + "POST") + body1 = parse_qs(HTTPretty.last_request.body) + + resp2 = conn.make_request('myCmd2', + {'par3': 'bar', 'par4': 'narf'}, + "/", + "POST") + body2 = parse_qs(HTTPretty.last_request.body) + + self.assertEqual(body1[b'par1'], [b'foo']) + self.assertEqual(body1[b'par2'], [b'baz']) + with self.assertRaises(KeyError): + body1[b'par3'] + + self.assertEqual(body2[b'par3'], [b'bar']) + self.assertEqual(body2[b'par4'], [b'narf']) + with self.assertRaises(KeyError): + body2['par1'] + + self.assertEqual(resp1.read(), b'{"test": "secure"}') + self.assertEqual(resp2.read(), b'{"test": "secure"}') + + def test_non_secure(self): + HTTPretty.register_uri(HTTPretty.POST, + 'http://%s/' % self.region.endpoint, + json.dumps({'test': 'normal'}), + content_type='application/json') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret', + is_secure=False) + resp = conn.make_request('myCmd1', + {'par1': 'foo', 'par2': 'baz'}, + "/", + "POST") + + self.assertEqual(resp.read(), b'{"test": "normal"}') + + def test_alternate_port(self): + HTTPretty.register_uri(HTTPretty.POST, + 'http://%s:8080/' % self.region.endpoint, + json.dumps({'test': 'alternate'}), + content_type='application/json') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret', + port=8080, + is_secure=False) + resp = conn.make_request('myCmd1', + {'par1': 'foo', 'par2': 'baz'}, + "/", + "POST") + + self.assertEqual(resp.read(), b'{"test": "alternate"}') + + def test_temp_failure(self): + responses = [HTTPretty.Response(body="{'test': 'fail'}", status=500), + HTTPretty.Response(body="{'test': 'success'}", status=200)] + + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/temp_fail/' % self.region.endpoint, + responses=responses) + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + resp = conn.make_request('myCmd1', + {'par1': 'foo', 'par2': 'baz'}, + '/temp_fail/', + 'POST') + self.assertEqual(resp.read(), b"{'test': 'success'}") + + def test_unhandled_exception(self): + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/temp_exception/' % self.region.endpoint, + responses=[]) + + def fake_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + raise socket.timeout('fake error') + + socket.create_connection = fake_connection + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + conn.num_retries = 0 + with self.assertRaises(socket.error): + resp = conn.make_request('myCmd1', + {'par1': 'foo', 'par2': 'baz'}, + '/temp_exception/', + 'POST') + + def test_connection_close(self): + """Check connection re-use after close header is received""" + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/' % self.region.endpoint, + json.dumps({'test': 'secure'}), + content_type='application/json', + connection='close') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + + def mock_put_conn(*args, **kwargs): + raise Exception('put_http_connection should not be called!') + + conn.put_http_connection = mock_put_conn + + resp1 = conn.make_request('myCmd1', + {'par1': 'foo', 'par2': 'baz'}, + "/", + "POST") + + # If we've gotten this far then no exception was raised + # by attempting to put the connection back into the pool + # Now let's just confirm the close header was actually + # set or we have another problem. + self.assertEqual(resp1.getheader('connection'), 'close') + + def test_port_pooling(self): + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret', + port=8080) + + # Pick a connection, then put it back + con1 = conn.get_http_connection(conn.host, conn.port, conn.is_secure) + conn.put_http_connection(conn.host, conn.port, conn.is_secure, con1) + + # Pick another connection, which hopefully is the same yet again + con2 = conn.get_http_connection(conn.host, conn.port, conn.is_secure) + conn.put_http_connection(conn.host, conn.port, conn.is_secure, con2) + + self.assertEqual(con1, con2) + + # Change the port and make sure a new connection is made + conn.port = 8081 + + con3 = conn.get_http_connection(conn.host, conn.port, conn.is_secure) + conn.put_http_connection(conn.host, conn.port, conn.is_secure, con3) + + self.assertNotEqual(con1, con3) + + +class TestAWSQueryStatus(TestAWSQueryConnection): + + def test_get_status(self): + HTTPretty.register_uri(HTTPretty.GET, + 'https://%s/status' % self.region.endpoint, + 'ok', + content_type='text/xml') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + resp = conn.get_status('getStatus', + {'par1': 'foo', 'par2': 'baz'}, + 'status') + + self.assertEqual(resp, "ok") + + def test_get_status_blank_error(self): + HTTPretty.register_uri(HTTPretty.GET, + 'https://%s/status' % self.region.endpoint, + '', + content_type='text/xml') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + with self.assertRaises(BotoServerError): + resp = conn.get_status('getStatus', + {'par1': 'foo', 'par2': 'baz'}, + 'status') + + def test_get_status_error(self): + HTTPretty.register_uri(HTTPretty.GET, + 'https://%s/status' % self.region.endpoint, + 'error', + content_type='text/xml', + status=400) + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + with self.assertRaises(BotoServerError): + resp = conn.get_status('getStatus', + {'par1': 'foo', 'par2': 'baz'}, + 'status') + + +class TestHTTPRequest(unittest.TestCase): + def test_user_agent_not_url_encoded(self): + headers = {'Some-Header': u'should be url encoded', + 'User-Agent': UserAgent} + request = HTTPRequest('PUT', 'https', 'amazon.com', 443, None, + None, {}, headers, 'Body') + mock_connection = mock.Mock() + + # Create a method that preserves the headers at the time of + # authorization. + def mock_add_auth(req, **kwargs): + mock_connection.headers_at_auth = req.headers.copy() + + mock_connection._auth_handler.add_auth = mock_add_auth + + request.authorize(mock_connection) + # Ensure the headers at authorization are as expected i.e. + # the user agent header was not url encoded but the other header was. + self.assertEqual(mock_connection.headers_at_auth, + {'Some-Header': 'should%20be%20url%20encoded', + 'User-Agent': UserAgent}) + + def test_content_length_str(self): + request = HTTPRequest('PUT', 'https', 'amazon.com', 443, None, + None, {}, {}, 'Body') + mock_connection = mock.Mock() + request.authorize(mock_connection) + + # Ensure Content-Length header is a str. This is more explicit than + # relying on other code cast the value later. (Python 2.7.0, for + # example, assumes headers are of type str.) + self.assertIsInstance(request.headers['Content-Length'], str) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/test_endpoints.json b/desktop/core/ext-py/boto-2.38.0/tests/unit/test_endpoints.json new file mode 100644 index 0000000000000000000000000000000000000000..325176a750742cf1d65845135296da7802bd8c35 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/test_endpoints.json @@ -0,0 +1,5 @@ +{ + "ec2": { + "test-1": "ec2.test-1.amazonaws.com" + } +} diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/test_exception.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/test_exception.py new file mode 100644 index 0000000000000000000000000000000000000000..d9a2bdd33e12f5d17bf8e82b415d30275c308a97 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/test_exception.py @@ -0,0 +1,123 @@ +from tests.unit import unittest + +from boto.exception import BotoServerError, S3CreateError, JSONResponseError + +from httpretty import HTTPretty, httprettified + + +class TestBotoServerError(unittest.TestCase): + + def test_botoservererror_basics(self): + bse = BotoServerError('400', 'Bad Request') + self.assertEqual(bse.status, '400') + self.assertEqual(bse.reason, 'Bad Request') + + def test_message_elb_xml(self): + # This test XML response comes from #509 + xml = """ + + + Sender + LoadBalancerNotFound + Cannot find Load Balancer webapp-balancer2 + + 093f80d0-4473-11e1-9234-edce8ec08e2d +""" + bse = BotoServerError('400', 'Bad Request', body=xml) + + self.assertEqual(bse.error_message, 'Cannot find Load Balancer webapp-balancer2') + self.assertEqual(bse.error_message, bse.message) + self.assertEqual(bse.request_id, '093f80d0-4473-11e1-9234-edce8ec08e2d') + self.assertEqual(bse.error_code, 'LoadBalancerNotFound') + self.assertEqual(bse.status, '400') + self.assertEqual(bse.reason, 'Bad Request') + + def test_message_sd_xml(self): + # Sample XML response from: https://forums.aws.amazon.com/thread.jspa?threadID=87393 + xml = """ + + + + AuthorizationFailure + Session does not have permission to perform (sdb:CreateDomain) on resource (arn:aws:sdb:us-east-1:xxxxxxx:domain/test_domain). Contact account owner. + 0.0055590278 + + + e73bb2bb-63e3-9cdc-f220-6332de66dbbe +""" + bse = BotoServerError('403', 'Forbidden', body=xml) + self.assertEqual( + bse.error_message, + 'Session does not have permission to perform (sdb:CreateDomain) on ' + 'resource (arn:aws:sdb:us-east-1:xxxxxxx:domain/test_domain). ' + 'Contact account owner.') + self.assertEqual(bse.error_message, bse.message) + self.assertEqual(bse.box_usage, '0.0055590278') + self.assertEqual(bse.error_code, 'AuthorizationFailure') + self.assertEqual(bse.status, '403') + self.assertEqual(bse.reason, 'Forbidden') + + @httprettified + def test_xmlns_not_loaded(self): + xml = '' + bse = BotoServerError('403', 'Forbidden', body=xml) + self.assertEqual([], HTTPretty.latest_requests) + + @httprettified + def test_xml_entity_not_loaded(self): + xml = ']>error:&xxe;' + bse = BotoServerError('403', 'Forbidden', body=xml) + self.assertEqual([], HTTPretty.latest_requests) + + def test_message_storage_create_error(self): + # This test value comes from https://answers.launchpad.net/duplicity/+question/150801 + xml = """ + + BucketAlreadyOwnedByYou + Your previous request to create the named bucket succeeded and you already own it. + cmsbk + FF8B86A32CC3FE4F + 6ENGL3DT9f0n7Tkv4qdKIs/uBNCMMA6QUFapw265WmodFDluP57esOOkecp55qhh + +""" + s3ce = S3CreateError('409', 'Conflict', body=xml) + + self.assertEqual(s3ce.bucket, 'cmsbk') + self.assertEqual(s3ce.error_code, 'BucketAlreadyOwnedByYou') + self.assertEqual(s3ce.status, '409') + self.assertEqual(s3ce.reason, 'Conflict') + self.assertEqual( + s3ce.error_message, + 'Your previous request to create the named bucket succeeded ' + 'and you already own it.') + self.assertEqual(s3ce.error_message, s3ce.message) + self.assertEqual(s3ce.request_id, 'FF8B86A32CC3FE4F') + + def test_message_json_response_error(self): + # This test comes from https://forums.aws.amazon.com/thread.jspa?messageID=374936 + body = { + '__type': 'com.amazon.coral.validate#ValidationException', + 'message': 'The attempted filter operation is not supported ' + 'for the provided filter argument count'} + + jre = JSONResponseError('400', 'Bad Request', body=body) + + self.assertEqual(jre.status, '400') + self.assertEqual(jre.reason, 'Bad Request') + self.assertEqual(jre.error_message, body['message']) + self.assertEqual(jre.error_message, jre.message) + self.assertEqual(jre.code, 'ValidationException') + self.assertEqual(jre.code, jre.error_code) + + def test_message_not_xml(self): + body = 'This is not XML' + + bse = BotoServerError('400', 'Bad Request', body=body) + self.assertEqual(bse.error_message, 'This is not XML') + + def test_getters(self): + body = "This is the body" + + bse = BotoServerError('400', 'Bad Request', body=body) + self.assertEqual(bse.code, bse.error_code) + self.assertEqual(bse.message, bse.error_message) diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/test_regioninfo.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/test_regioninfo.py new file mode 100644 index 0000000000000000000000000000000000000000..d262562cedc8627a8349f908f60e0b8d2205dcbb --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/test_regioninfo.py @@ -0,0 +1,144 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os +from tests.unit import unittest + +import boto +from boto.regioninfo import RegionInfo, load_endpoint_json, merge_endpoints +from boto.regioninfo import load_regions, get_regions + + +class TestRegionInfo(object): + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + self.connection = connection + self.name = name + self.endpoint = endpoint + self.connection_cls = connection_cls + + +class FakeConn(object): + pass + + +class TestEndpointLoading(unittest.TestCase): + def setUp(self): + super(TestEndpointLoading, self).setUp() + + def test_load_endpoint_json(self): + endpoints = load_endpoint_json(boto.ENDPOINTS_PATH) + self.assertTrue('ec2' in endpoints) + self.assertEqual( + endpoints['ec2']['us-east-1'], + 'ec2.us-east-1.amazonaws.com' + ) + + def test_merge_endpoints(self): + defaults = { + 'ec2': { + 'us-east-1': 'ec2.us-east-1.amazonaws.com', + 'us-west-1': 'ec2.us-west-1.amazonaws.com', + } + } + additions = { + # Top-level addition. + 's3': { + 'us-east-1': 's3.amazonaws.com' + }, + 'ec2': { + # Overwrite. This doesn't exist, just test data. + 'us-east-1': 'ec2.auto-resolve.amazonaws.com', + # Deep addition. + 'us-west-2': 'ec2.us-west-2.amazonaws.com', + } + } + + endpoints = merge_endpoints(defaults, additions) + self.assertEqual(endpoints, { + 'ec2': { + 'us-east-1': 'ec2.auto-resolve.amazonaws.com', + 'us-west-1': 'ec2.us-west-1.amazonaws.com', + 'us-west-2': 'ec2.us-west-2.amazonaws.com', + }, + 's3': { + 'us-east-1': 's3.amazonaws.com' + } + }) + + def test_load_regions(self): + # Just the defaults. + endpoints = load_regions() + self.assertTrue('us-east-1' in endpoints['ec2']) + self.assertFalse('test-1' in endpoints['ec2']) + + # With ENV overrides. + os.environ['BOTO_ENDPOINTS'] = os.path.join( + os.path.dirname(__file__), + 'test_endpoints.json' + ) + self.addCleanup(os.environ.pop, 'BOTO_ENDPOINTS') + endpoints = load_regions() + self.assertTrue('us-east-1' in endpoints['ec2']) + self.assertTrue('test-1' in endpoints['ec2']) + self.assertEqual(endpoints['ec2']['test-1'], 'ec2.test-1.amazonaws.com') + + def test_get_regions(self): + # With defaults. + ec2_regions = get_regions('ec2') + self.assertTrue(len(ec2_regions) >= 10) + west_2 = None + + for region_info in ec2_regions: + if region_info.name == 'us-west-2': + west_2 = region_info + break + + self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!") + self.assertTrue(isinstance(west_2, RegionInfo)) + self.assertEqual(west_2.name, 'us-west-2') + self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com') + self.assertEqual(west_2.connection_cls, None) + + def test_get_regions_overrides(self): + ec2_regions = get_regions( + 'ec2', + region_cls=TestRegionInfo, + connection_cls=FakeConn + ) + self.assertTrue(len(ec2_regions) >= 10) + west_2 = None + + for region_info in ec2_regions: + if region_info.name == 'us-west-2': + west_2 = region_info + break + + self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!") + self.assertFalse(isinstance(west_2, RegionInfo)) + self.assertTrue(isinstance(west_2, TestRegionInfo)) + self.assertEqual(west_2.name, 'us-west-2') + self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com') + self.assertEqual(west_2.connection_cls, FakeConn) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/utils/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/utils/test_utils.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/utils/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d96978c1d0ed256a9e64606c5cb7d87abd5f2c7d --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/utils/test_utils.py @@ -0,0 +1,321 @@ +# Copyright (c) 2010 Robert Mela +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from tests.compat import mock, unittest + +import datetime +import hashlib +import hmac +import locale +import time + +import boto.utils +from boto.utils import Password +from boto.utils import pythonize_name +from boto.utils import _build_instance_metadata_url +from boto.utils import get_instance_userdata +from boto.utils import retry_url +from boto.utils import LazyLoadMetadata + +from boto.compat import json, _thread + + +@unittest.skip("http://bugs.python.org/issue7980") +class TestThreadImport(unittest.TestCase): + def test_strptime(self): + def f(): + for m in range(1, 13): + for d in range(1,29): + boto.utils.parse_ts('2013-01-01T00:00:00Z') + + for _ in range(10): + _thread.start_new_thread(f, ()) + + time.sleep(3) + + +class TestPassword(unittest.TestCase): + """Test basic password functionality""" + + def clstest(self, cls): + """Insure that password.__eq__ hashes test value before compare.""" + password = cls('foo') + self.assertNotEquals(password, 'foo') + + password.set('foo') + hashed = str(password) + self.assertEquals(password, 'foo') + self.assertEquals(password.str, hashed) + + password = cls(hashed) + self.assertNotEquals(password.str, 'foo') + self.assertEquals(password, 'foo') + self.assertEquals(password.str, hashed) + + def test_aaa_version_1_9_default_behavior(self): + self.clstest(Password) + + def test_custom_hashclass(self): + class SHA224Password(Password): + hashfunc = hashlib.sha224 + + password = SHA224Password() + password.set('foo') + self.assertEquals(hashlib.sha224(b'foo').hexdigest(), str(password)) + + def test_hmac(self): + def hmac_hashfunc(cls, msg): + if not isinstance(msg, bytes): + msg = msg.encode('utf-8') + return hmac.new(b'mysecretkey', msg) + + class HMACPassword(Password): + hashfunc = hmac_hashfunc + + self.clstest(HMACPassword) + password = HMACPassword() + password.set('foo') + + self.assertEquals(str(password), + hmac.new(b'mysecretkey', b'foo').hexdigest()) + + def test_constructor(self): + hmac_hashfunc = lambda msg: hmac.new(b'mysecretkey', msg) + + password = Password(hashfunc=hmac_hashfunc) + password.set('foo') + self.assertEquals(password.str, + hmac.new(b'mysecretkey', b'foo').hexdigest()) + + +class TestPythonizeName(unittest.TestCase): + def test_empty_string(self): + self.assertEqual(pythonize_name(''), '') + + def test_all_lower_case(self): + self.assertEqual(pythonize_name('lowercase'), 'lowercase') + + def test_all_upper_case(self): + self.assertEqual(pythonize_name('UPPERCASE'), 'uppercase') + + def test_camel_case(self): + self.assertEqual(pythonize_name('OriginallyCamelCased'), + 'originally_camel_cased') + + def test_already_pythonized(self): + self.assertEqual(pythonize_name('already_pythonized'), + 'already_pythonized') + + def test_multiple_upper_cased_letters(self): + self.assertEqual(pythonize_name('HTTPRequest'), 'http_request') + self.assertEqual(pythonize_name('RequestForHTTP'), 'request_for_http') + + def test_string_with_numbers(self): + self.assertEqual(pythonize_name('HTTPStatus200Ok'), 'http_status_200_ok') + + +class TestBuildInstanceMetadataURL(unittest.TestCase): + def test_normal(self): + # This is the all-defaults case. + self.assertEqual(_build_instance_metadata_url( + 'http://169.254.169.254', + 'latest', + 'meta-data/' + ), + 'http://169.254.169.254/latest/meta-data/' + ) + + def test_custom_path(self): + self.assertEqual(_build_instance_metadata_url( + 'http://169.254.169.254', + 'latest', + 'dynamic/' + ), + 'http://169.254.169.254/latest/dynamic/' + ) + + def test_custom_version(self): + self.assertEqual(_build_instance_metadata_url( + 'http://169.254.169.254', + '1.0', + 'meta-data/' + ), + 'http://169.254.169.254/1.0/meta-data/' + ) + + def test_custom_url(self): + self.assertEqual(_build_instance_metadata_url( + 'http://10.0.1.5', + 'latest', + 'meta-data/' + ), + 'http://10.0.1.5/latest/meta-data/' + ) + + def test_all_custom(self): + self.assertEqual(_build_instance_metadata_url( + 'http://10.0.1.5', + '2013-03-22', + 'user-data' + ), + 'http://10.0.1.5/2013-03-22/user-data' + ) + +class TestRetryURL(unittest.TestCase): + def setUp(self): + self.urlopen_patch = mock.patch('boto.compat.urllib.request.urlopen') + self.opener_patch = mock.patch('boto.compat.urllib.request.build_opener') + self.urlopen = self.urlopen_patch.start() + self.opener = self.opener_patch.start() + + def tearDown(self): + self.urlopen_patch.stop() + self.opener_patch.stop() + + def set_normal_response(self, response): + fake_response = mock.Mock() + fake_response.read.return_value = response + self.urlopen.return_value = fake_response + + def set_no_proxy_allowed_response(self, response): + fake_response = mock.Mock() + fake_response.read.return_value = response + self.opener.return_value.open.return_value = fake_response + + def test_retry_url_uses_proxy(self): + self.set_normal_response('normal response') + self.set_no_proxy_allowed_response('no proxy response') + + response = retry_url('http://10.10.10.10/foo', num_retries=1) + self.assertEqual(response, 'no proxy response') + + def test_retry_url_using_bytes_and_string_response(self): + test_value = 'normal response' + fake_response = mock.Mock() + + # test using unicode + fake_response.read.return_value = test_value + self.opener.return_value.open.return_value = fake_response + response = retry_url('http://10.10.10.10/foo', num_retries=1) + self.assertEqual(response, test_value) + + # test using bytes + fake_response.read.return_value = test_value.encode('utf-8') + self.opener.return_value.open.return_value = fake_response + response = retry_url('http://10.10.10.10/foo', num_retries=1) + self.assertEqual(response, test_value) + +class TestLazyLoadMetadata(unittest.TestCase): + + def setUp(self): + self.retry_url_patch = mock.patch('boto.utils.retry_url') + boto.utils.retry_url = self.retry_url_patch.start() + + def tearDown(self): + self.retry_url_patch.stop() + + def set_normal_response(self, data): + # here "data" should be a list of return values in some order + fake_response = mock.Mock() + fake_response.side_effect = data + boto.utils.retry_url = fake_response + + def test_meta_data_with_invalid_json_format_happened_once(self): + # here "key_data" will be stored in the "self._leaves" + # when the class "LazyLoadMetadata" initialized + key_data = "test" + invalid_data = '{"invalid_json_format" : true,}' + valid_data = '{ "%s" : {"valid_json_format": true}}' % key_data + url = "/".join(["http://169.254.169.254", key_data]) + num_retries = 2 + + self.set_normal_response([key_data, invalid_data, valid_data]) + response = LazyLoadMetadata(url, num_retries) + self.assertEqual(list(response.values())[0], json.loads(valid_data)) + + def test_meta_data_with_invalid_json_format_happened_twice(self): + key_data = "test" + invalid_data = '{"invalid_json_format" : true,}' + valid_data = '{ "%s" : {"valid_json_format": true}}' % key_data + url = "/".join(["http://169.254.169.254", key_data]) + num_retries = 2 + + self.set_normal_response([key_data, invalid_data, invalid_data]) + response = LazyLoadMetadata(url, num_retries) + with self.assertRaises(ValueError): + response.values()[0] + + def test_user_data(self): + self.set_normal_response(['foo']) + + userdata = get_instance_userdata() + + self.assertEqual('foo', userdata) + + boto.utils.retry_url.assert_called_with( + 'http://169.254.169.254/latest/user-data', + retry_on_404=False, + num_retries=5, timeout=None) + + def test_user_data_timeout(self): + self.set_normal_response(['foo']) + + userdata = get_instance_userdata(timeout=1, num_retries=2) + + self.assertEqual('foo', userdata) + + boto.utils.retry_url.assert_called_with( + 'http://169.254.169.254/latest/user-data', + retry_on_404=False, + num_retries=2, timeout=1) + + +class TestStringToDatetimeParsing(unittest.TestCase): + """ Test string to datetime parsing """ + def setUp(self): + self._saved = locale.setlocale(locale.LC_ALL) + try: + locale.setlocale(locale.LC_ALL, 'de_DE.UTF-8') + except locale.Error: + self.skipTest('Unsupported locale setting') + + def tearDown(self): + locale.setlocale(locale.LC_ALL, self._saved) + + def test_nonus_locale(self): + test_string = 'Thu, 15 May 2014 09:06:03 GMT' + + # Default strptime shoudl fail + with self.assertRaises(ValueError): + datetime.datetime.strptime(test_string, boto.utils.RFC1123) + + # Our parser should succeed + result = boto.utils.parse_ts(test_string) + + self.assertEqual(2014, result.year) + self.assertEqual(5, result.month) + self.assertEqual(15, result.day) + self.assertEqual(9, result.hour) + self.assertEqual(6, result.minute) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/__init__.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c7856c5b1b52bd958bdae54feb9e37562c288821 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/__init__.py @@ -0,0 +1,3 @@ +""" +Test package for VPC +""" diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_customergateway.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_customergateway.py new file mode 100644 index 0000000000000000000000000000000000000000..1b24f407f999e70272c96525eb72a2ef4646041f --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_customergateway.py @@ -0,0 +1,115 @@ +from tests.compat import OrderedDict +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.vpc import VPCConnection, CustomerGateway + + +class TestDescribeCustomerGateways(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + cgw-b4dc3961 + available + ipsec.1 + 12.1.2.3 + 65534 + + + + + """ + + def test_get_all_customer_gateways(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.get_all_customer_gateways( + 'cgw-b4dc3961', + filters=OrderedDict([('state', ['pending', 'available']), + ('ip-address', '12.1.2.3')])) + self.assert_request_parameters({ + 'Action': 'DescribeCustomerGateways', + 'CustomerGatewayId.1': 'cgw-b4dc3961', + 'Filter.1.Name': 'state', + 'Filter.1.Value.1': 'pending', + 'Filter.1.Value.2': 'available', + 'Filter.2.Name': 'ip-address', + 'Filter.2.Value.1': '12.1.2.3'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(len(api_response), 1) + self.assertIsInstance(api_response[0], CustomerGateway) + self.assertEqual(api_response[0].id, 'cgw-b4dc3961') + + +class TestCreateCustomerGateway(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + cgw-b4dc3961 + pending + ipsec.1 + 12.1.2.3 + 65534 + + + + """ + + def test_create_customer_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_customer_gateway( + 'ipsec.1', '12.1.2.3', 65534) + self.assert_request_parameters({ + 'Action': 'CreateCustomerGateway', + 'Type': 'ipsec.1', + 'IpAddress': '12.1.2.3', + 'BgpAsn': 65534}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertIsInstance(api_response, CustomerGateway) + self.assertEquals(api_response.id, 'cgw-b4dc3961') + self.assertEquals(api_response.state, 'pending') + self.assertEquals(api_response.type, 'ipsec.1') + self.assertEquals(api_response.ip_address, '12.1.2.3') + self.assertEquals(api_response.bgp_asn, 65534) + + +class TestDeleteCustomerGateway(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true + + """ + + def test_delete_customer_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_customer_gateway('cgw-b4dc3961') + self.assert_request_parameters({ + 'Action': 'DeleteCustomerGateway', + 'CustomerGatewayId': 'cgw-b4dc3961'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_dhcpoptions.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_dhcpoptions.py new file mode 100644 index 0000000000000000000000000000000000000000..222bf218294d5fe5b2dfe17377d2b658cb5033af --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_dhcpoptions.py @@ -0,0 +1,215 @@ +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.vpc import VPCConnection, DhcpOptions + + +class TestDescribeDhcpOptions(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + dopt-7a8b9c2d + + + domain-name + + + example.com + + + + + domain-name-servers + + + 10.2.5.1 + + + + + domain-name-servers + + + 10.2.5.2 + + + + + + + + + """ + + def test_get_all_dhcp_options(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.get_all_dhcp_options(['dopt-7a8b9c2d'], + [('key', 'domain-name')]) + self.assert_request_parameters({ + 'Action': 'DescribeDhcpOptions', + 'DhcpOptionsId.1': 'dopt-7a8b9c2d', + 'Filter.1.Name': 'key', + 'Filter.1.Value.1': 'domain-name'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(len(api_response), 1) + self.assertIsInstance(api_response[0], DhcpOptions) + self.assertEquals(api_response[0].id, 'dopt-7a8b9c2d') + self.assertEquals(api_response[0].options['domain-name'], ['example.com']) + self.assertEquals(api_response[0].options['domain-name-servers'], ['10.2.5.1', '10.2.5.2']) + + +class TestCreateDhcpOptions(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + dopt-7a8b9c2d + + + domain-name + + + example.com + + + + + domain-name-servers + + + 10.2.5.1 + + + 10.2.5.2 + + + + + ntp-servers + + + 10.12.12.1 + + + 10.12.12.2 + + + + + netbios-name-servers + + + 10.20.20.1 + + + + + netbios-node-type + + + 2 + + + + + + + + """ + + def test_create_dhcp_options(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_dhcp_options( + domain_name='example.com', domain_name_servers=['10.2.5.1', '10.2.5.2'], + ntp_servers=('10.12.12.1', '10.12.12.2'), + netbios_name_servers='10.20.20.1', + netbios_node_type='2') + self.assert_request_parameters({ + 'Action': 'CreateDhcpOptions', + 'DhcpConfiguration.1.Key': 'domain-name', + 'DhcpConfiguration.1.Value.1': 'example.com', + 'DhcpConfiguration.2.Key': 'domain-name-servers', + 'DhcpConfiguration.2.Value.1': '10.2.5.1', + 'DhcpConfiguration.2.Value.2': '10.2.5.2', + 'DhcpConfiguration.3.Key': 'ntp-servers', + 'DhcpConfiguration.3.Value.1': '10.12.12.1', + 'DhcpConfiguration.3.Value.2': '10.12.12.2', + 'DhcpConfiguration.4.Key': 'netbios-name-servers', + 'DhcpConfiguration.4.Value.1': '10.20.20.1', + 'DhcpConfiguration.5.Key': 'netbios-node-type', + 'DhcpConfiguration.5.Value.1': '2'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertIsInstance(api_response, DhcpOptions) + self.assertEquals(api_response.id, 'dopt-7a8b9c2d') + self.assertEquals(api_response.options['domain-name'], ['example.com']) + self.assertEquals(api_response.options['domain-name-servers'], ['10.2.5.1', '10.2.5.2']) + self.assertEquals(api_response.options['ntp-servers'], ['10.12.12.1', '10.12.12.2']) + self.assertEquals(api_response.options['netbios-name-servers'], ['10.20.20.1']) + self.assertEquals(api_response.options['netbios-node-type'], ['2']) + + +class TestDeleteDhcpOptions(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true + + """ + + def test_delete_dhcp_options(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_dhcp_options('dopt-7a8b9c2d') + self.assert_request_parameters({ + 'Action': 'DeleteDhcpOptions', + 'DhcpOptionsId': 'dopt-7a8b9c2d'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +class TestAssociateDhcpOptions(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true + + """ + + def test_associate_dhcp_options(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.associate_dhcp_options( + 'dopt-7a8b9c2d', 'vpc-1a2b3c4d') + self.assert_request_parameters({ + 'Action': 'AssociateDhcpOptions', + 'DhcpOptionsId': 'dopt-7a8b9c2d', + 'VpcId': 'vpc-1a2b3c4d'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_internetgateway.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_internetgateway.py new file mode 100644 index 0000000000000000000000000000000000000000..05f76af0977e80a10fcad54d6ed5b51d8a9ed486 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_internetgateway.py @@ -0,0 +1,152 @@ +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.vpc import VPCConnection, InternetGateway + + +class TestDescribeInternetGateway(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + igw-eaad4883EXAMPLE + + + vpc-11ad4878 + available + + + + + + + """ + + def test_describe_internet_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.get_all_internet_gateways( + 'igw-eaad4883EXAMPLE', filters=[('attachment.state', ['available', 'pending'])]) + self.assert_request_parameters({ + 'Action': 'DescribeInternetGateways', + 'InternetGatewayId.1': 'igw-eaad4883EXAMPLE', + 'Filter.1.Name': 'attachment.state', + 'Filter.1.Value.1': 'available', + 'Filter.1.Value.2': 'pending'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(len(api_response), 1) + self.assertIsInstance(api_response[0], InternetGateway) + self.assertEqual(api_response[0].id, 'igw-eaad4883EXAMPLE') + + +class TestCreateInternetGateway(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + igw-eaad4883 + + + + + """ + + def test_create_internet_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_internet_gateway() + self.assert_request_parameters({ + 'Action': 'CreateInternetGateway'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertIsInstance(api_response, InternetGateway) + self.assertEqual(api_response.id, 'igw-eaad4883') + + +class TestDeleteInternetGateway(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + + """ + + def test_delete_internet_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_internet_gateway('igw-eaad4883') + self.assert_request_parameters({ + 'Action': 'DeleteInternetGateway', + 'InternetGatewayId': 'igw-eaad4883'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +class TestAttachInternetGateway(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + + """ + + def test_attach_internet_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.attach_internet_gateway( + 'igw-eaad4883', 'vpc-11ad4878') + self.assert_request_parameters({ + 'Action': 'AttachInternetGateway', + 'InternetGatewayId': 'igw-eaad4883', + 'VpcId': 'vpc-11ad4878'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +class TestDetachInternetGateway(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + + """ + + def test_detach_internet_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.detach_internet_gateway( + 'igw-eaad4883', 'vpc-11ad4878') + self.assert_request_parameters({ + 'Action': 'DetachInternetGateway', + 'InternetGatewayId': 'igw-eaad4883', + 'VpcId': 'vpc-11ad4878'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_networkacl.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_networkacl.py new file mode 100644 index 0000000000000000000000000000000000000000..d399b22d43cef47d5a98341171da0b03444d8dd5 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_networkacl.py @@ -0,0 +1,521 @@ +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.vpc import VPCConnection + + +class TestDescribeNetworkAcls(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + acl-5566953c + vpc-5266953b + true + + + 100 + all + allow + true + 0.0.0.0/0 + + + 32767 + all + deny + true + 0.0.0.0/0 + + + 100 + all + allow + false + 0.0.0.0/0 + + + 32767 + all + deny + false + 0.0.0.0/0 + + + + + + + acl-5d659634 + vpc-5266953b + false + + + 110 + 6 + allow + true + 0.0.0.0/0 + + 49152 + 65535 + + + + 32767 + all + deny + true + 0.0.0.0/0 + + + 110 + 6 + allow + false + 0.0.0.0/0 + + 80 + 80 + + + + 120 + 6 + allow + false + 0.0.0.0/0 + + 443 + 443 + + + + 32767 + all + deny + false + 0.0.0.0/0 + + + + + aclassoc-5c659635 + acl-5d659634 + subnet-ff669596 + + + aclassoc-c26596ab + acl-5d659634 + subnet-f0669599 + + + + + + + """ + + def test_get_all_network_acls(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_network_acls(['acl-5566953c', 'acl-5d659634'], + [('vpc-id', 'vpc-5266953b')]) + self.assert_request_parameters({ + 'Action': 'DescribeNetworkAcls', + 'NetworkAclId.1': 'acl-5566953c', + 'NetworkAclId.2': 'acl-5d659634', + 'Filter.1.Name': 'vpc-id', + 'Filter.1.Value.1': 'vpc-5266953b'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(len(response), 2) + + +class TestReplaceNetworkAclAssociation(AWSMockServiceTestCase): + + connection_class = VPCConnection + + get_all_network_acls_vpc_body = b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + acl-5566953c + vpc-5266953b + true + + + 100 + all + allow + true + 0.0.0.0/0 + + + 32767 + all + deny + true + 0.0.0.0/0 + + + 100 + all + allow + false + 0.0.0.0/0 + + + 32767 + all + deny + false + 0.0.0.0/0 + + + + + + + + + """ + + get_all_network_acls_subnet_body = b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + acl-5d659634 + vpc-5266953b + false + + + 110 + 6 + allow + true + 0.0.0.0/0 + + 49152 + 65535 + + + + + + aclassoc-c26596ab + acl-5d659634 + subnet-f0669599 + + + aclassoc-5c659635 + acl-5d659634 + subnet-ff669596 + + + + + + + """ + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + aclassoc-17b85d7e + + """ + + def test_associate_network_acl(self): + self.https_connection.getresponse.side_effect = [ + self.create_response(status_code=200, body=self.get_all_network_acls_subnet_body), + self.create_response(status_code=200) + ] + response = self.service_connection.associate_network_acl('acl-5fb85d36', 'subnet-ff669596') + # Note: Not testing proper call to get_all_network_acls! + self.assert_request_parameters({ + 'Action': 'ReplaceNetworkAclAssociation', + 'NetworkAclId': 'acl-5fb85d36', + 'AssociationId': 'aclassoc-5c659635'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(response, 'aclassoc-17b85d7e') + + def test_disassociate_network_acl(self): + self.https_connection.getresponse.side_effect = [ + self.create_response(status_code=200, body=self.get_all_network_acls_vpc_body), + self.create_response(status_code=200, body=self.get_all_network_acls_subnet_body), + self.create_response(status_code=200) + ] + response = self.service_connection.disassociate_network_acl('subnet-ff669596', + 'vpc-5266953b') + # Note: Not testing proper call to either call to get_all_network_acls! + self.assert_request_parameters({ + 'Action': 'ReplaceNetworkAclAssociation', + 'NetworkAclId': 'acl-5566953c', + 'AssociationId': 'aclassoc-5c659635'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(response, 'aclassoc-17b85d7e') + + +class TestCreateNetworkAcl(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + acl-5fb85d36 + vpc-11ad4878 + false + + + 32767 + all + deny + true + 0.0.0.0/0 + + + 32767 + all + deny + false + 0.0.0.0/0 + + + + + + + """ + + def test_create_network_acl(self): + self.set_http_response(status_code=200) + response = self.service_connection.create_network_acl('vpc-11ad4878') + self.assert_request_parameters({ + 'Action': 'CreateNetworkAcl', + 'VpcId': 'vpc-11ad4878'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(response.id, 'acl-5fb85d36') + + +class DeleteCreateNetworkAcl(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + + """ + + def test_delete_network_acl(self): + self.set_http_response(status_code=200) + response = self.service_connection.delete_network_acl('acl-2cb85d45') + self.assert_request_parameters({ + 'Action': 'DeleteNetworkAcl', + 'NetworkAclId': 'acl-2cb85d45'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(response, True) + + +class TestCreateNetworkAclEntry(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + + """ + + def test_create_network_acl(self): + self.set_http_response(status_code=200) + response = self.service_connection.create_network_acl_entry( + 'acl-2cb85d45', 110, 'udp', 'allow', '0.0.0.0/0', egress=False, + port_range_from=53, port_range_to=53) + self.assert_request_parameters({ + 'Action': 'CreateNetworkAclEntry', + 'NetworkAclId': 'acl-2cb85d45', + 'RuleNumber': 110, + 'Protocol': 'udp', + 'RuleAction': 'allow', + 'Egress': 'false', + 'CidrBlock': '0.0.0.0/0', + 'PortRange.From': 53, + 'PortRange.To': 53}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(response, True) + + def test_create_network_acl_icmp(self): + self.set_http_response(status_code=200) + response = self.service_connection.create_network_acl_entry( + 'acl-2cb85d45', 110, 'udp', 'allow', '0.0.0.0/0', egress='true', + icmp_code=-1, icmp_type=8) + self.assert_request_parameters({ + 'Action': 'CreateNetworkAclEntry', + 'NetworkAclId': 'acl-2cb85d45', + 'RuleNumber': 110, + 'Protocol': 'udp', + 'RuleAction': 'allow', + 'Egress': 'true', + 'CidrBlock': '0.0.0.0/0', + 'Icmp.Code': -1, + 'Icmp.Type': 8}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(response, True) + + +class TestReplaceNetworkAclEntry(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + + """ + + def test_replace_network_acl(self): + self.set_http_response(status_code=200) + response = self.service_connection.replace_network_acl_entry( + 'acl-2cb85d45', 110, 'tcp', 'deny', '0.0.0.0/0', egress=False, + port_range_from=139, port_range_to=139) + self.assert_request_parameters({ + 'Action': 'ReplaceNetworkAclEntry', + 'NetworkAclId': 'acl-2cb85d45', + 'RuleNumber': 110, + 'Protocol': 'tcp', + 'RuleAction': 'deny', + 'Egress': 'false', + 'CidrBlock': '0.0.0.0/0', + 'PortRange.From': 139, + 'PortRange.To': 139}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(response, True) + + def test_replace_network_acl_icmp(self): + self.set_http_response(status_code=200) + response = self.service_connection.replace_network_acl_entry( + 'acl-2cb85d45', 110, 'tcp', 'deny', '0.0.0.0/0', + icmp_code=-1, icmp_type=8) + self.assert_request_parameters({ + 'Action': 'ReplaceNetworkAclEntry', + 'NetworkAclId': 'acl-2cb85d45', + 'RuleNumber': 110, + 'Protocol': 'tcp', + 'RuleAction': 'deny', + 'CidrBlock': '0.0.0.0/0', + 'Icmp.Code': -1, + 'Icmp.Type': 8}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(response, True) + + +class TestDeleteNetworkAclEntry(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + + """ + + def test_delete_network_acl(self): + self.set_http_response(status_code=200) + response = self.service_connection.delete_network_acl_entry('acl-2cb85d45', 100, + egress=False) + self.assert_request_parameters({ + 'Action': 'DeleteNetworkAclEntry', + 'NetworkAclId': 'acl-2cb85d45', + 'RuleNumber': 100, + 'Egress': 'false'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(response, True) + + +class TestGetNetworkAclAssociations(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + + acl-5d659634 + vpc-5266953b + false + + + 110 + 6 + allow + true + 0.0.0.0/0 + + 49152 + 65535 + + + + + + aclassoc-c26596ab + acl-5d659634 + subnet-f0669599 + + + + + + + """ + + def test_get_network_acl_associations(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.get_all_network_acls() + association = api_response[0].associations[0] + self.assertEqual(association.network_acl_id, 'acl-5d659634') + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_routetable.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_routetable.py new file mode 100644 index 0000000000000000000000000000000000000000..c90e56c4f67882e67a504c8cb9c12858c0adf7e6 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_routetable.py @@ -0,0 +1,440 @@ +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.vpc import VPCConnection, RouteTable + + +class TestDescribeRouteTables(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 6f570b0b-9c18-4b07-bdec-73740dcf861a + + + rtb-13ad487a + vpc-11ad4878 + + + 10.0.0.0/22 + local + active + CreateRouteTable + + + + + rtbassoc-12ad487b + rtb-13ad487a +
    true
    +
    +
    + +
    + + rtb-f9ad4890 + vpc-11ad4878 + + + 10.0.0.0/22 + local + active + CreateRouteTable + + + 0.0.0.0/0 + igw-eaad4883 + active + + + 10.0.0.0/21 + eni-884ec1d1 + blackhole + CreateRoute + + + 11.0.0.0/22 + pcx-efc52b86 + blackhole + CreateRoute + + + + + rtbassoc-faad4893 + rtb-f9ad4890 + subnet-15ad487c + + + + +
    +
    + """ + + def test_get_all_route_tables(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.get_all_route_tables( + ['rtb-13ad487a', 'rtb-f9ad4890'], filters=[('route.state', 'active')]) + self.assert_request_parameters({ + 'Action': 'DescribeRouteTables', + 'RouteTableId.1': 'rtb-13ad487a', + 'RouteTableId.2': 'rtb-f9ad4890', + 'Filter.1.Name': 'route.state', + 'Filter.1.Value.1': 'active'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(len(api_response), 2) + self.assertIsInstance(api_response[0], RouteTable) + self.assertEquals(api_response[0].id, 'rtb-13ad487a') + self.assertEquals(len(api_response[0].routes), 1) + self.assertEquals(api_response[0].routes[0].destination_cidr_block, '10.0.0.0/22') + self.assertEquals(api_response[0].routes[0].gateway_id, 'local') + self.assertEquals(api_response[0].routes[0].state, 'active') + self.assertEquals(len(api_response[0].associations), 1) + self.assertEquals(api_response[0].associations[0].id, 'rtbassoc-12ad487b') + self.assertEquals(api_response[0].associations[0].route_table_id, 'rtb-13ad487a') + self.assertIsNone(api_response[0].associations[0].subnet_id) + self.assertEquals(api_response[0].associations[0].main, True) + self.assertEquals(api_response[1].id, 'rtb-f9ad4890') + self.assertEquals(len(api_response[1].routes), 4) + self.assertEquals(api_response[1].routes[0].destination_cidr_block, '10.0.0.0/22') + self.assertEquals(api_response[1].routes[0].gateway_id, 'local') + self.assertEquals(api_response[1].routes[0].state, 'active') + self.assertEquals(api_response[1].routes[1].destination_cidr_block, '0.0.0.0/0') + self.assertEquals(api_response[1].routes[1].gateway_id, 'igw-eaad4883') + self.assertEquals(api_response[1].routes[1].state, 'active') + self.assertEquals(api_response[1].routes[2].destination_cidr_block, '10.0.0.0/21') + self.assertEquals(api_response[1].routes[2].interface_id, 'eni-884ec1d1') + self.assertEquals(api_response[1].routes[2].state, 'blackhole') + self.assertEquals(api_response[1].routes[3].destination_cidr_block, '11.0.0.0/22') + self.assertEquals(api_response[1].routes[3].vpc_peering_connection_id, 'pcx-efc52b86') + self.assertEquals(api_response[1].routes[3].state, 'blackhole') + self.assertEquals(len(api_response[1].associations), 1) + self.assertEquals(api_response[1].associations[0].id, 'rtbassoc-faad4893') + self.assertEquals(api_response[1].associations[0].route_table_id, 'rtb-f9ad4890') + self.assertEquals(api_response[1].associations[0].subnet_id, 'subnet-15ad487c') + self.assertEquals(api_response[1].associations[0].main, False) + + +class TestAssociateRouteTable(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + rtbassoc-f8ad4891 + + """ + + def test_associate_route_table(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.associate_route_table( + 'rtb-e4ad488d', 'subnet-15ad487c') + self.assert_request_parameters({ + 'Action': 'AssociateRouteTable', + 'RouteTableId': 'rtb-e4ad488d', + 'SubnetId': 'subnet-15ad487c'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, 'rtbassoc-f8ad4891') + + +class TestDisassociateRouteTable(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + + """ + + def test_disassociate_route_table(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.disassociate_route_table('rtbassoc-fdad4894') + self.assert_request_parameters({ + 'Action': 'DisassociateRouteTable', + 'AssociationId': 'rtbassoc-fdad4894'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +class TestCreateRouteTable(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + rtb-f9ad4890 + vpc-11ad4878 + + + 10.0.0.0/22 + local + active + + + + + + + """ + + def test_create_route_table(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_route_table('vpc-11ad4878') + self.assert_request_parameters({ + 'Action': 'CreateRouteTable', + 'VpcId': 'vpc-11ad4878'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertIsInstance(api_response, RouteTable) + self.assertEquals(api_response.id, 'rtb-f9ad4890') + self.assertEquals(len(api_response.routes), 1) + self.assertEquals(api_response.routes[0].destination_cidr_block, '10.0.0.0/22') + self.assertEquals(api_response.routes[0].gateway_id, 'local') + self.assertEquals(api_response.routes[0].state, 'active') + + +class TestDeleteRouteTable(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + + """ + + def test_delete_route_table(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_route_table('rtb-e4ad488d') + self.assert_request_parameters({ + 'Action': 'DeleteRouteTable', + 'RouteTableId': 'rtb-e4ad488d'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +class TestReplaceRouteTableAssociation(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + rtbassoc-faad4893 + + """ + + def test_replace_route_table_assocation(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.replace_route_table_assocation( + 'rtbassoc-faad4893', 'rtb-f9ad4890') + self.assert_request_parameters({ + 'Action': 'ReplaceRouteTableAssociation', + 'AssociationId': 'rtbassoc-faad4893', + 'RouteTableId': 'rtb-f9ad4890'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + def test_replace_route_table_association_with_assoc(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.replace_route_table_association_with_assoc( + 'rtbassoc-faad4893', 'rtb-f9ad4890') + self.assert_request_parameters({ + 'Action': 'ReplaceRouteTableAssociation', + 'AssociationId': 'rtbassoc-faad4893', + 'RouteTableId': 'rtb-f9ad4890'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, 'rtbassoc-faad4893') + + +class TestCreateRoute(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + + """ + + def test_create_route_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_route( + 'rtb-e4ad488d', '0.0.0.0/0', gateway_id='igw-eaad4883') + self.assert_request_parameters({ + 'Action': 'CreateRoute', + 'RouteTableId': 'rtb-e4ad488d', + 'DestinationCidrBlock': '0.0.0.0/0', + 'GatewayId': 'igw-eaad4883'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + def test_create_route_instance(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_route( + 'rtb-g8ff4ea2', '0.0.0.0/0', instance_id='i-1a2b3c4d') + self.assert_request_parameters({ + 'Action': 'CreateRoute', + 'RouteTableId': 'rtb-g8ff4ea2', + 'DestinationCidrBlock': '0.0.0.0/0', + 'InstanceId': 'i-1a2b3c4d'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + def test_create_route_interface(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_route( + 'rtb-g8ff4ea2', '0.0.0.0/0', interface_id='eni-1a2b3c4d') + self.assert_request_parameters({ + 'Action': 'CreateRoute', + 'RouteTableId': 'rtb-g8ff4ea2', + 'DestinationCidrBlock': '0.0.0.0/0', + 'NetworkInterfaceId': 'eni-1a2b3c4d'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + def test_create_route_vpc_peering_connection(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_route( + 'rtb-g8ff4ea2', '0.0.0.0/0', vpc_peering_connection_id='pcx-1a2b3c4d') + self.assert_request_parameters({ + 'Action': 'CreateRoute', + 'RouteTableId': 'rtb-g8ff4ea2', + 'DestinationCidrBlock': '0.0.0.0/0', + 'VpcPeeringConnectionId': 'pcx-1a2b3c4d'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +class TestReplaceRoute(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + + """ + + def test_replace_route_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.replace_route( + 'rtb-e4ad488d', '0.0.0.0/0', gateway_id='igw-eaad4883') + self.assert_request_parameters({ + 'Action': 'ReplaceRoute', + 'RouteTableId': 'rtb-e4ad488d', + 'DestinationCidrBlock': '0.0.0.0/0', + 'GatewayId': 'igw-eaad4883'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + def test_replace_route_instance(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.replace_route( + 'rtb-g8ff4ea2', '0.0.0.0/0', instance_id='i-1a2b3c4d') + self.assert_request_parameters({ + 'Action': 'ReplaceRoute', + 'RouteTableId': 'rtb-g8ff4ea2', + 'DestinationCidrBlock': '0.0.0.0/0', + 'InstanceId': 'i-1a2b3c4d'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + def test_replace_route_interface(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.replace_route( + 'rtb-g8ff4ea2', '0.0.0.0/0', interface_id='eni-1a2b3c4d') + self.assert_request_parameters({ + 'Action': 'ReplaceRoute', + 'RouteTableId': 'rtb-g8ff4ea2', + 'DestinationCidrBlock': '0.0.0.0/0', + 'NetworkInterfaceId': 'eni-1a2b3c4d'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + def test_replace_route_vpc_peering_connection(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.replace_route( + 'rtb-g8ff4ea2', '0.0.0.0/0', vpc_peering_connection_id='pcx-1a2b3c4d') + self.assert_request_parameters({ + 'Action': 'ReplaceRoute', + 'RouteTableId': 'rtb-g8ff4ea2', + 'DestinationCidrBlock': '0.0.0.0/0', + 'VpcPeeringConnectionId': 'pcx-1a2b3c4d'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +class TestDeleteRoute(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + true + + """ + + def test_delete_route(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_route('rtb-e4ad488d', '172.16.1.0/24') + self.assert_request_parameters({ + 'Action': 'DeleteRoute', + 'RouteTableId': 'rtb-e4ad488d', + 'DestinationCidrBlock': '172.16.1.0/24'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_subnet.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_subnet.py new file mode 100644 index 0000000000000000000000000000000000000000..209d4f0f42ce6e029e092e1b3e47203bf0263be9 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_subnet.py @@ -0,0 +1,133 @@ +from tests.compat import OrderedDict +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.vpc import VPCConnection, Subnet + + +class TestDescribeSubnets(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + subnet-9d4a7b6c + available + vpc-1a2b3c4d + 10.0.1.0/24 + 251 + us-east-1a + false + false + + + + subnet-6e7f829e + available + vpc-1a2b3c4d + 10.0.0.0/24 + 251 + us-east-1a + false + false + + + + + """ + + def test_get_all_subnets(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.get_all_subnets( + ['subnet-9d4a7b6c', 'subnet-6e7f829e'], + filters=OrderedDict([('state', 'available'), + ('vpc-id', ['subnet-9d4a7b6c', 'subnet-6e7f829e'])])) + self.assert_request_parameters({ + 'Action': 'DescribeSubnets', + 'SubnetId.1': 'subnet-9d4a7b6c', + 'SubnetId.2': 'subnet-6e7f829e', + 'Filter.1.Name': 'state', + 'Filter.1.Value.1': 'available', + 'Filter.2.Name': 'vpc-id', + 'Filter.2.Value.1': 'subnet-9d4a7b6c', + 'Filter.2.Value.2': 'subnet-6e7f829e'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(len(api_response), 2) + self.assertIsInstance(api_response[0], Subnet) + self.assertEqual(api_response[0].id, 'subnet-9d4a7b6c') + self.assertEqual(api_response[1].id, 'subnet-6e7f829e') + + +class TestCreateSubnet(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + subnet-9d4a7b6c + pending + vpc-1a2b3c4d + 10.0.1.0/24 + 251 + us-east-1a + + + + """ + + def test_create_subnet(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_subnet( + 'vpc-1a2b3c4d', '10.0.1.0/24', 'us-east-1a') + self.assert_request_parameters({ + 'Action': 'CreateSubnet', + 'VpcId': 'vpc-1a2b3c4d', + 'CidrBlock': '10.0.1.0/24', + 'AvailabilityZone': 'us-east-1a'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertIsInstance(api_response, Subnet) + self.assertEquals(api_response.id, 'subnet-9d4a7b6c') + self.assertEquals(api_response.state, 'pending') + self.assertEquals(api_response.vpc_id, 'vpc-1a2b3c4d') + self.assertEquals(api_response.cidr_block, '10.0.1.0/24') + self.assertEquals(api_response.available_ip_address_count, 251) + self.assertEquals(api_response.availability_zone, 'us-east-1a') + + +class TestDeleteSubnet(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true + + """ + + def test_delete_subnet(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_subnet('subnet-9d4a7b6c') + self.assert_request_parameters({ + 'Action': 'DeleteSubnet', + 'SubnetId': 'subnet-9d4a7b6c'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpc.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpc.py new file mode 100644 index 0000000000000000000000000000000000000000..f8adaf8aa652342a63e72ed2542fe0ee9e8f6fe1 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpc.py @@ -0,0 +1,367 @@ +# -*- coding: UTF-8 -*- +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.vpc import VPCConnection, VPC +from boto.ec2.securitygroup import SecurityGroup + + +DESCRIBE_VPCS = b''' + + 623040d1-b51c-40bc-8080-93486f38d03d + + + vpc-12345678 + available + 172.16.0.0/16 + dopt-12345678 + default + false + + +''' + + +class TestDescribeVPCs(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return DESCRIBE_VPCS + + def test_get_vpcs(self): + self.set_http_response(status_code=200) + + api_response = self.service_connection.get_all_vpcs() + self.assertEqual(len(api_response), 1) + + vpc = api_response[0] + self.assertFalse(vpc.is_default) + self.assertEqual(vpc.instance_tenancy, 'default') + + +class TestCreateVpc(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + vpc-1a2b3c4d + pending + 10.0.0.0/16 + dopt-1a2b3c4d2 + default + + + + """ + + def test_create_vpc(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_vpc('10.0.0.0/16', 'default') + self.assert_request_parameters({ + 'Action': 'CreateVpc', + 'InstanceTenancy': 'default', + 'CidrBlock': '10.0.0.0/16'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertIsInstance(api_response, VPC) + self.assertEquals(api_response.id, 'vpc-1a2b3c4d') + self.assertEquals(api_response.state, 'pending') + self.assertEquals(api_response.cidr_block, '10.0.0.0/16') + self.assertEquals(api_response.dhcp_options_id, 'dopt-1a2b3c4d2') + self.assertEquals(api_response.instance_tenancy, 'default') + + +class TestDeleteVpc(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true + + """ + + def test_delete_vpc(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_vpc('vpc-1a2b3c4d') + self.assert_request_parameters({ + 'Action': 'DeleteVpc', + 'VpcId': 'vpc-1a2b3c4d'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +class TestModifyVpcAttribute(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true + + """ + + def test_modify_vpc_attribute_dns_support(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.modify_vpc_attribute( + 'vpc-1a2b3c4d', enable_dns_support=True) + self.assert_request_parameters({ + 'Action': 'ModifyVpcAttribute', + 'VpcId': 'vpc-1a2b3c4d', + 'EnableDnsSupport.Value': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + def test_modify_vpc_attribute_dns_hostnames(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.modify_vpc_attribute( + 'vpc-1a2b3c4d', enable_dns_hostnames=True) + self.assert_request_parameters({ + 'Action': 'ModifyVpcAttribute', + 'VpcId': 'vpc-1a2b3c4d', + 'EnableDnsHostnames.Value': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +class TestGetAllClassicLinkVpc(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 2484655d-d669-4950-bf55-7ba559805d36 + + + vpc-6226ab07 + false + + + Name + hello[ + + + + + vpc-9d24f8f8 + true + + + + + """ + + def test_get_all_classic_link_vpcs(self): + self.set_http_response(status_code=200) + response = self.service_connection.get_all_classic_link_vpcs() + self.assertEqual(len(response), 2) + vpc = response[0] + self.assertEqual(vpc.id, 'vpc-6226ab07') + self.assertEqual(vpc.classic_link_enabled, 'false') + self.assertEqual(vpc.tags, {'Name': 'hello'}) + + def test_get_all_classic_link_vpcs_params(self): + self.set_http_response(status_code=200) + self.service_connection.get_all_classic_link_vpcs( + vpc_ids=['id1', 'id2'], + filters={'GroupId': 'sg-9b4343fe'}, + dry_run=True, + ) + self.assert_request_parameters({ + 'Action': 'DescribeVpcClassicLink', + 'VpcId.1': 'id1', + 'VpcId.2': 'id2', + 'Filter.1.Name': 'GroupId', + 'Filter.1.Value.1': 'sg-9b4343fe', + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestVpcClassicLink(AWSMockServiceTestCase): + connection_class = VPCConnection + + def setUp(self): + super(TestVpcClassicLink, self).setUp() + self.vpc = VPC(self.service_connection) + self.vpc_id = 'myid' + self.vpc.id = self.vpc_id + + +class TestAttachClassicLinkVpc(TestVpcClassicLink): + def default_body(self): + return b""" + + 88673bdf-cd16-40bf-87a1-6132fec47257 + true + + """ + + def test_attach_classic_link_instance_string_groups(self): + groups = ['sg-foo', 'sg-bar'] + + self.set_http_response(status_code=200) + response = self.vpc.attach_classic_instance( + instance_id='my_instance_id', + groups=groups, + dry_run=True + ) + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'AttachClassicLinkVpc', + 'VpcId': self.vpc_id, + 'InstanceId': 'my_instance_id', + 'SecurityGroupId.1': 'sg-foo', + 'SecurityGroupId.2': 'sg-bar', + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + def test_attach_classic_link_instance_object_groups(self): + sec_group_1 = SecurityGroup() + sec_group_1.id = 'sg-foo' + + sec_group_2 = SecurityGroup() + sec_group_2.id = 'sg-bar' + + groups = [sec_group_1, sec_group_2] + + self.set_http_response(status_code=200) + response = self.vpc.attach_classic_instance( + instance_id='my_instance_id', + groups=groups, + dry_run=True + ) + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'AttachClassicLinkVpc', + 'VpcId': self.vpc_id, + 'InstanceId': 'my_instance_id', + 'SecurityGroupId.1': 'sg-foo', + 'SecurityGroupId.2': 'sg-bar', + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestDetachClassicLinkVpc(TestVpcClassicLink): + def default_body(self): + return b""" + + 5565033d-1321-4eef-b121-6aa46f152ed7 + true + + """ + + def test_detach_classic_link_instance(self): + self.set_http_response(status_code=200) + response = self.vpc.detach_classic_instance( + instance_id='my_instance_id', + dry_run=True + ) + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'DetachClassicLinkVpc', + 'VpcId': self.vpc_id, + 'InstanceId': 'my_instance_id', + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestEnableClassicLinkVpc(TestVpcClassicLink): + def default_body(self): + return b""" + + 4ab2b2b3-a267-4366-a070-bab853b5927d + true + + """ + + def test_enable_classic_link(self): + self.set_http_response(status_code=200) + response = self.vpc.enable_classic_link( + dry_run=True + ) + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'EnableVpcClassicLink', + 'VpcId': self.vpc_id, + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestDisableClassicLinkVpc(TestVpcClassicLink): + def default_body(self): + return b""" + + 4ab2b2b3-a267-4366-a070-bab853b5927d + true + + """ + + def test_enable_classic_link(self): + self.set_http_response(status_code=200) + response = self.vpc.disable_classic_link( + dry_run=True + ) + self.assertTrue(response) + self.assert_request_parameters({ + 'Action': 'DisableVpcClassicLink', + 'VpcId': self.vpc_id, + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + + +class TestUpdateClassicLinkVpc(TestVpcClassicLink): + def default_body(self): + return b""" + + 2484655d-d669-4950-bf55-7ba559805d36 + + + myid + true + + + + + """ + + def test_vpc_update_classic_link_enabled(self): + self.vpc.classic_link_enabled = False + self.set_http_response(status_code=200) + self.vpc.update_classic_link_enabled( + dry_run=True, + validate=True + ) + self.assert_request_parameters({ + 'Action': 'DescribeVpcClassicLink', + 'VpcId.1': self.vpc_id, + 'DryRun': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', 'Version']) + self.assertEqual(self.vpc.classic_link_enabled, 'true') + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpc_peering_connection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpc_peering_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..503e5606548a5bb9484ccb3840908356e8a1e890 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpc_peering_connection.py @@ -0,0 +1,275 @@ +# Copyright (c) 2014 Skytap http://skytap.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from tests.unit import mock, unittest +from tests.unit import AWSMockServiceTestCase + +from boto.vpc import VpcPeeringConnection, VPCConnection, Subnet + + +class TestDescribeVpcPeeringConnections(AWSMockServiceTestCase): + DESCRIBE_VPC_PEERING_CONNECTIONS= b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + pcx-111aaa22 + + 777788889999 + vpc-1a2b3c4d + 172.31.0.0/16 + + + 111122223333 + vpc-aa22cc33 + + + pending-acceptance + Pending Acceptance by 111122223333 + + 2014-02-17T16:00:50.000Z + + + pcx-444bbb88 + + 1237897234 + vpc-2398abcd + 172.30.0.0/16 + + + 98654313 + vpc-0983bcda + + + pending-acceptance + Pending Acceptance by 98654313 + + 2015-02-17T16:00:50.000Z + + +""" + + connection_class = VPCConnection + + def default_body(self): + return self.DESCRIBE_VPC_PEERING_CONNECTIONS + + def test_get_vpc_peering_connections(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.get_all_vpc_peering_connections( + ['pcx-111aaa22', 'pcx-444bbb88'], filters=[('status-code', ['pending-acceptance'])]) + + self.assertEqual(len(api_response), 2) + + for vpc_peering_connection in api_response: + if vpc_peering_connection.id == 'pcx-111aaa22': + self.assertEqual(vpc_peering_connection.id, 'pcx-111aaa22') + self.assertEqual(vpc_peering_connection.status_code, 'pending-acceptance') + self.assertEqual(vpc_peering_connection.status_message, 'Pending Acceptance by 111122223333') + self.assertEqual(vpc_peering_connection.requester_vpc_info.owner_id, '777788889999') + self.assertEqual(vpc_peering_connection.requester_vpc_info.vpc_id, 'vpc-1a2b3c4d') + self.assertEqual(vpc_peering_connection.requester_vpc_info.cidr_block, '172.31.0.0/16') + self.assertEqual(vpc_peering_connection.accepter_vpc_info.owner_id, '111122223333') + self.assertEqual(vpc_peering_connection.accepter_vpc_info.vpc_id, 'vpc-aa22cc33') + self.assertEqual(vpc_peering_connection.expiration_time, '2014-02-17T16:00:50.000Z') + else: + self.assertEqual(vpc_peering_connection.id, 'pcx-444bbb88') + self.assertEqual(vpc_peering_connection.status_code, 'pending-acceptance') + self.assertEqual(vpc_peering_connection.status_message, 'Pending Acceptance by 98654313') + self.assertEqual(vpc_peering_connection.requester_vpc_info.owner_id, '1237897234') + self.assertEqual(vpc_peering_connection.requester_vpc_info.vpc_id, 'vpc-2398abcd') + self.assertEqual(vpc_peering_connection.requester_vpc_info.cidr_block, '172.30.0.0/16') + self.assertEqual(vpc_peering_connection.accepter_vpc_info.owner_id, '98654313') + self.assertEqual(vpc_peering_connection.accepter_vpc_info.vpc_id, 'vpc-0983bcda') + self.assertEqual(vpc_peering_connection.expiration_time, '2015-02-17T16:00:50.000Z') + + +class TestCreateVpcPeeringConnection(AWSMockServiceTestCase): + CREATE_VPC_PEERING_CONNECTION= b""" + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + pcx-73a5401a + + 777788889999 + vpc-1a2b3c4d + 10.0.0.0/28 + + + 123456789012 + vpc-a1b2c3d4 + + + initiating-request + Initiating Request to 123456789012 + + 2014-02-18T14:37:25.000Z + + +""" + + connection_class = VPCConnection + + def default_body(self): + return self.CREATE_VPC_PEERING_CONNECTION + + def test_create_vpc_peering_connection(self): + self.set_http_response(status_code=200) + vpc_peering_connection = self.service_connection.create_vpc_peering_connection('vpc-1a2b3c4d', 'vpc-a1b2c3d4', '123456789012') + + self.assertEqual(vpc_peering_connection.id, 'pcx-73a5401a') + self.assertEqual(vpc_peering_connection.status_code, 'initiating-request') + self.assertEqual(vpc_peering_connection.status_message, 'Initiating Request to 123456789012') + self.assertEqual(vpc_peering_connection.requester_vpc_info.owner_id, '777788889999') + self.assertEqual(vpc_peering_connection.requester_vpc_info.vpc_id, 'vpc-1a2b3c4d') + self.assertEqual(vpc_peering_connection.requester_vpc_info.cidr_block, '10.0.0.0/28') + self.assertEqual(vpc_peering_connection.accepter_vpc_info.owner_id, '123456789012') + self.assertEqual(vpc_peering_connection.accepter_vpc_info.vpc_id, 'vpc-a1b2c3d4') + self.assertEqual(vpc_peering_connection.expiration_time, '2014-02-18T14:37:25.000Z') + +class TestDeleteVpcPeeringConnection(AWSMockServiceTestCase): + DELETE_VPC_PEERING_CONNECTION= b""" + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true +""" + + connection_class = VPCConnection + + def default_body(self): + return self.DELETE_VPC_PEERING_CONNECTION + + def test_delete_vpc_peering_connection(self): + self.set_http_response(status_code=200) + self.assertEquals(self.service_connection.delete_vpc_peering_connection('pcx-12345678'), True) + +class TestDeleteVpcPeeringConnectionShortForm(unittest.TestCase): + DESCRIBE_VPC_PEERING_CONNECTIONS= b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + pcx-111aaa22 + + 777788889999 + vpc-1a2b3c4d + 172.31.0.0/16 + + + 111122223333 + vpc-aa22cc33 + + + pending-acceptance + Pending Acceptance by 111122223333 + + 2014-02-17T16:00:50.000Z + + +""" + + DELETE_VPC_PEERING_CONNECTION= b""" + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true +""" + + def test_delete_vpc_peering_connection(self): + vpc_conn = VPCConnection(aws_access_key_id='aws_access_key_id', + aws_secret_access_key='aws_secret_access_key') + + mock_response = mock.Mock() + mock_response.read.return_value = self.DESCRIBE_VPC_PEERING_CONNECTIONS + mock_response.status = 200 + vpc_conn.make_request = mock.Mock(return_value=mock_response) + vpc_peering_connections = vpc_conn.get_all_vpc_peering_connections() + + self.assertEquals(1, len(vpc_peering_connections)) + vpc_peering_connection = vpc_peering_connections[0] + + mock_response = mock.Mock() + mock_response.read.return_value = self.DELETE_VPC_PEERING_CONNECTION + mock_response.status = 200 + vpc_conn.make_request = mock.Mock(return_value=mock_response) + self.assertEquals(True, vpc_peering_connection.delete()) + + self.assertIn('DeleteVpcPeeringConnection', vpc_conn.make_request.call_args_list[0][0]) + self.assertNotIn('DeleteVpc', vpc_conn.make_request.call_args_list[0][0]) + +class TestRejectVpcPeeringConnection(AWSMockServiceTestCase): + REJECT_VPC_PEERING_CONNECTION= b""" + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true +""" + + connection_class = VPCConnection + + def default_body(self): + return self.REJECT_VPC_PEERING_CONNECTION + + def test_reject_vpc_peering_connection(self): + self.set_http_response(status_code=200) + self.assertEquals(self.service_connection.reject_vpc_peering_connection('pcx-12345678'), True) + + +class TestAcceptVpcPeeringConnection(AWSMockServiceTestCase): + ACCEPT_VPC_PEERING_CONNECTION= b""" + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + pcx-1a2b3c4d + + 123456789012 + vpc-1a2b3c4d + 10.0.0.0/28 + + + 777788889999 + vpc-111aaa22 + 10.0.1.0/28 + + + active + Active + + + +""" + + connection_class = VPCConnection + + def default_body(self): + return self.ACCEPT_VPC_PEERING_CONNECTION + + def test_accept_vpc_peering_connection(self): + self.set_http_response(status_code=200) + vpc_peering_connection = self.service_connection.accept_vpc_peering_connection('pcx-1a2b3c4d') + + self.assertEqual(vpc_peering_connection.id, 'pcx-1a2b3c4d') + self.assertEqual(vpc_peering_connection.status_code, 'active') + self.assertEqual(vpc_peering_connection.status_message, 'Active') + self.assertEqual(vpc_peering_connection.requester_vpc_info.owner_id, '123456789012') + self.assertEqual(vpc_peering_connection.requester_vpc_info.vpc_id, 'vpc-1a2b3c4d') + self.assertEqual(vpc_peering_connection.requester_vpc_info.cidr_block, '10.0.0.0/28') + self.assertEqual(vpc_peering_connection.accepter_vpc_info.owner_id, '777788889999') + self.assertEqual(vpc_peering_connection.accepter_vpc_info.vpc_id, 'vpc-111aaa22') + self.assertEqual(vpc_peering_connection.accepter_vpc_info.cidr_block, '10.0.1.0/28') + + + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpnconnection.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpnconnection.py new file mode 100644 index 0000000000000000000000000000000000000000..c93c653947d164359ee687dcaa629f5d7f616a30 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpnconnection.py @@ -0,0 +1,254 @@ +# -*- coding: UTF-8 -*- +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.vpc import VPCConnection, VpnConnection + +DESCRIBE_VPNCONNECTIONS = b''' + + 12345678-asdf-ghjk-zxcv-0987654321nb + + + vpn-12qw34er56ty + available + + <?xml version="1.0" encoding="UTF-8"?> + + ipsec.1 + cgw-1234qwe9 + vgw-lkjh1234 + + + Name + VPN 1 + + + + + 123.45.67.89 + DOWN + 2013-03-19T19:20:34.000Z + + 0 + + + 123.45.67.90 + UP + 2013-03-20T08:00:14.000Z + + 0 + + + + true + + + + 192.168.0.0/24 + static + available + + + + + vpn-qwerty12 + pending + + <?xml version="1.0" encoding="UTF-8"?> + + ipsec.1 + cgw-01234567 + vgw-asdfghjk + + + 134.56.78.78 + UP + 2013-03-20T01:46:30.000Z + + 0 + + + 134.56.78.79 + UP + 2013-03-19T19:23:59.000Z + + 0 + + + + true + + + + 10.0.0.0/16 + static + pending + + + + +''' + + +class TestDescribeVPNConnections(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return DESCRIBE_VPNCONNECTIONS + + def test_get_vpcs(self): + self.set_http_response(status_code=200) + + api_response = self.service_connection.get_all_vpn_connections( + ['vpn-12qw34er56ty', 'vpn-qwerty12'], filters=[('state', ['pending', 'available'])]) + self.assert_request_parameters({ + 'Action': 'DescribeVpnConnections', + 'VpnConnectionId.1': 'vpn-12qw34er56ty', + 'VpnConnectionId.2': 'vpn-qwerty12', + 'Filter.1.Name': 'state', + 'Filter.1.Value.1': 'pending', + 'Filter.1.Value.2': 'available'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(len(api_response), 2) + + vpn0 = api_response[0] + self.assertEqual(vpn0.type, 'ipsec.1') + self.assertEqual(vpn0.customer_gateway_id, 'cgw-1234qwe9') + self.assertEqual(vpn0.vpn_gateway_id, 'vgw-lkjh1234') + self.assertEqual(len(vpn0.tunnels), 2) + self.assertDictEqual(vpn0.tags, {'Name': 'VPN 1'}) + + vpn1 = api_response[1] + self.assertEqual(vpn1.state, 'pending') + self.assertEqual(len(vpn1.static_routes), 1) + self.assertTrue(vpn1.options.static_routes_only) + self.assertEqual(vpn1.tunnels[0].status, 'UP') + self.assertEqual(vpn1.tunnels[1].status, 'UP') + self.assertDictEqual(vpn1.tags, {}) + self.assertEqual(vpn1.static_routes[0].source, 'static') + self.assertEqual(vpn1.static_routes[0].state, 'pending') + + +class TestCreateVPNConnection(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 5cc7891f-1f3b-4fc4-a626-bdea8f63ff5a + + vpn-83ad48ea + pending + + <?xml version="1.0" encoding="UTF-8"?> + + cgw-b4dc3961 + vgw-8db04f81 + + true + + + + + """ + + def test_create_vpn_connection(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_vpn_connection( + 'ipsec.1', 'cgw-b4dc3961', 'vgw-8db04f81', static_routes_only=True) + self.assert_request_parameters({ + 'Action': 'CreateVpnConnection', + 'Type': 'ipsec.1', + 'CustomerGatewayId': 'cgw-b4dc3961', + 'VpnGatewayId': 'vgw-8db04f81', + 'Options.StaticRoutesOnly': 'true'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertIsInstance(api_response, VpnConnection) + self.assertEquals(api_response.id, 'vpn-83ad48ea') + self.assertEquals(api_response.customer_gateway_id, 'cgw-b4dc3961') + self.assertEquals(api_response.options.static_routes_only, True) + + +class TestDeleteVPNConnection(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true + + """ + + def test_delete_vpn_connection(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_vpn_connection('vpn-44a8938f') + self.assert_request_parameters({ + 'Action': 'DeleteVpnConnection', + 'VpnConnectionId': 'vpn-44a8938f'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +class TestCreateVPNConnectionRoute(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 4f35a1b2-c2c3-4093-b51f-abb9d7311990 + true + + """ + + def test_create_vpn_connection_route(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_vpn_connection_route( + '11.12.0.0/16', 'vpn-83ad48ea') + self.assert_request_parameters({ + 'Action': 'CreateVpnConnectionRoute', + 'DestinationCidrBlock': '11.12.0.0/16', + 'VpnConnectionId': 'vpn-83ad48ea'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + + +class TestDeleteVPNConnectionRoute(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 4f35a1b2-c2c3-4093-b51f-abb9d7311990 + true + + """ + + def test_delete_vpn_connection_route(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_vpn_connection_route( + '11.12.0.0/16', 'vpn-83ad48ea') + self.assert_request_parameters({ + 'Action': 'DeleteVpnConnectionRoute', + 'DestinationCidrBlock': '11.12.0.0/16', + 'VpnConnectionId': 'vpn-83ad48ea'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEquals(api_response, True) + +if __name__ == '__main__': + unittest.main() diff --git a/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpngateway.py b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpngateway.py new file mode 100644 index 0000000000000000000000000000000000000000..36d9a31255357e0b4b48fe8a7b08cf583be0a675 --- /dev/null +++ b/desktop/core/ext-py/boto-2.38.0/tests/unit/vpc/test_vpngateway.py @@ -0,0 +1,220 @@ +# -*- coding: UTF-8 -*- +from tests.compat import OrderedDict +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.vpc import VPCConnection, VpnGateway, Attachment + + +class TestDescribeVpnGateways(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + + vgw-8db04f81 + available + ipsec.1 + us-east-1a + + + vpc-1a2b3c4d + attached + + + + + + + """ + + def test_get_all_vpn_gateways(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.get_all_vpn_gateways( + 'vgw-8db04f81', filters=OrderedDict([('state', ['pending', 'available']), + ('availability-zone', 'us-east-1a')])) + self.assert_request_parameters({ + 'Action': 'DescribeVpnGateways', + 'VpnGatewayId.1': 'vgw-8db04f81', + 'Filter.1.Name': 'state', + 'Filter.1.Value.1': 'pending', + 'Filter.1.Value.2': 'available', + 'Filter.2.Name': 'availability-zone', + 'Filter.2.Value.1': 'us-east-1a'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(len(api_response), 1) + self.assertIsInstance(api_response[0], VpnGateway) + self.assertEqual(api_response[0].id, 'vgw-8db04f81') + + +class TestCreateVpnGateway(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + vgw-8db04f81 + pending + ipsec.1 + us-east-1a + + + + + """ + + def test_delete_vpn_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_vpn_gateway('ipsec.1', 'us-east-1a') + self.assert_request_parameters({ + 'Action': 'CreateVpnGateway', + 'AvailabilityZone': 'us-east-1a', + 'Type': 'ipsec.1'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertIsInstance(api_response, VpnGateway) + self.assertEquals(api_response.id, 'vgw-8db04f81') + + +class TestDeleteVpnGateway(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true + + """ + + def test_delete_vpn_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_vpn_gateway('vgw-8db04f81') + self.assert_request_parameters({ + 'Action': 'DeleteVpnGateway', + 'VpnGatewayId': 'vgw-8db04f81'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(api_response, True) + + +class TestAttachVpnGateway(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + vpc-1a2b3c4d + attaching + + + """ + + def test_attach_vpn_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.attach_vpn_gateway('vgw-8db04f81', 'vpc-1a2b3c4d') + self.assert_request_parameters({ + 'Action': 'AttachVpnGateway', + 'VpnGatewayId': 'vgw-8db04f81', + 'VpcId': 'vpc-1a2b3c4d'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertIsInstance(api_response, Attachment) + self.assertEquals(api_response.vpc_id, 'vpc-1a2b3c4d') + self.assertEquals(api_response.state, 'attaching') + + +class TestDetachVpnGateway(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + true + + """ + + def test_detach_vpn_gateway(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.detach_vpn_gateway('vgw-8db04f81', 'vpc-1a2b3c4d') + self.assert_request_parameters({ + 'Action': 'DetachVpnGateway', + 'VpnGatewayId': 'vgw-8db04f81', + 'VpcId': 'vpc-1a2b3c4d'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(api_response, True) + + +class TestDisableVgwRoutePropagation(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 4f35a1b2-c2c3-4093-b51f-abb9d7311990 + true + + """ + + def test_disable_vgw_route_propagation(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.disable_vgw_route_propagation( + 'rtb-c98a35a0', 'vgw-d8e09e8a') + self.assert_request_parameters({ + 'Action': 'DisableVgwRoutePropagation', + 'GatewayId': 'vgw-d8e09e8a', + 'RouteTableId': 'rtb-c98a35a0'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(api_response, True) + + +class TestEnableVgwRoutePropagation(AWSMockServiceTestCase): + + connection_class = VPCConnection + + def default_body(self): + return b""" + + 4f35a1b2-c2c3-4093-b51f-abb9d7311990 + true + + """ + + def test_enable_vgw_route_propagation(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.enable_vgw_route_propagation( + 'rtb-c98a35a0', 'vgw-d8e09e8a') + self.assert_request_parameters({ + 'Action': 'EnableVgwRoutePropagation', + 'GatewayId': 'vgw-d8e09e8a', + 'RouteTableId': 'rtb-c98a35a0'}, + ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version']) + self.assertEqual(api_response, True) + +if __name__ == '__main__': + unittest.main() diff --git a/ext/thirdparty/README.md b/ext/thirdparty/README.md index 2619c56baec5f2519413ccfcd81cd39298008539..747ef18861235f27c5a9eaf3cee0febbbad8f72d 100644 --- a/ext/thirdparty/README.md +++ b/ext/thirdparty/README.md @@ -9,6 +9,7 @@ Checked-in third party dependencies |--------------|-------|-------|-------|----------| |Y|A Midsummer Nights Dream by Shakespeare|?|Public Domain from Gutenberg|http://www.gutenberg.org/dirs/etext98/2ws1710.txt| |Y|Avro|1.5.0|ASL2|http://avro.apache.org/| +|Y|boto|3.8.0|MIT|https://github.com/boto/boto/| |Y|CherryPy|3.1.2|BSD|http://www.cherrypy.org/| |Y|ConfigObj|4.6.0|BSD|http://www.voidspace.org.uk/python/configobj.html| |Y|ctypes|1.0.2|MIT|http://pypi.python.org/pypi/ctypes| -- 1.7.9.5